]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/hash.c
tdb2: implement tdb_exists and tdb_parse_record
[ccan] / ccan / tdb2 / hash.c
1  /*
2    Trivial Database 2: hash handling
3    Copyright (C) Rusty Russell 2010
4
5    This library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 3 of the License, or (at your option) any later version.
9
10    This library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 */
18 #include "private.h"
19 #include <assert.h>
20 #include <ccan/hash/hash.h>
21
22 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
23                              void *arg)
24 {
25         uint64_t ret;
26         /* hash64_stable assumes lower bits are more important; they are a
27          * slightly better hash.  We use the upper bits first, so swap them. */
28         ret = hash64_stable((const unsigned char *)key, length, seed);
29         return (ret >> 32) | (ret << 32);
30 }
31
32 void tdb_hash_init(struct tdb_context *tdb)
33 {
34         tdb->khash = jenkins_hash;
35         tdb->hash_priv = NULL;
36 }
37
38 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
39 {
40         return tdb->khash(ptr, len, tdb->hash_seed, tdb->hash_priv);
41 }
42
43 uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off)
44 {
45         const struct tdb_used_record *r;
46         const void *key;
47         uint64_t klen, hash;
48
49         r = tdb_access_read(tdb, off, sizeof(*r), true);
50         if (TDB_PTR_IS_ERR(r)) {
51                 /* FIXME */
52                 return 0;
53         }
54
55         klen = rec_key_length(r);
56         tdb_access_release(tdb, r);
57
58         key = tdb_access_read(tdb, off + sizeof(*r), klen, false);
59         if (TDB_PTR_IS_ERR(key)) {
60                 return 0;
61         }
62
63         hash = tdb_hash(tdb, key, klen);
64         tdb_access_release(tdb, key);
65         return hash;
66 }
67
68 /* Get bits from a value. */
69 static uint32_t bits_from(uint64_t val, unsigned start, unsigned num)
70 {
71         assert(num <= 32);
72         return (val >> start) & ((1U << num) - 1);
73 }
74
75 /* We take bits from the top: that way we can lock whole sections of the hash
76  * by using lock ranges. */
77 static uint32_t use_bits(struct hash_info *h, unsigned num)
78 {
79         h->hash_used += num;
80         return bits_from(h->h, 64 - h->hash_used, num);
81 }
82
83 static tdb_bool_err key_matches(struct tdb_context *tdb,
84                                 const struct tdb_used_record *rec,
85                                 tdb_off_t off,
86                                 const struct tdb_data *key)
87 {
88         tdb_bool_err ret = false;
89         const char *rkey;
90
91         if (rec_key_length(rec) != key->dsize) {
92                 add_stat(tdb, compare_wrong_keylen, 1);
93                 return ret;
94         }
95
96         rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false);
97         if (TDB_PTR_IS_ERR(rkey)) {
98                 return TDB_PTR_ERR(rkey);
99         }
100         if (memcmp(rkey, key->dptr, key->dsize) == 0)
101                 ret = true;
102         else
103                 add_stat(tdb, compare_wrong_keycmp, 1);
104         tdb_access_release(tdb, rkey);
105         return ret;
106 }
107
108 /* Does entry match? */
109 static tdb_bool_err match(struct tdb_context *tdb,
110                           struct hash_info *h,
111                           const struct tdb_data *key,
112                           tdb_off_t val,
113                           struct tdb_used_record *rec)
114 {
115         tdb_off_t off;
116         enum TDB_ERROR ecode;
117
118         add_stat(tdb, compares, 1);
119         /* Desired bucket must match. */
120         if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK)) {
121                 add_stat(tdb, compare_wrong_bucket, 1);
122                 return false;
123         }
124
125         /* Top bits of offset == next bits of hash. */
126         if (bits_from(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA)
127             != bits_from(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
128                     TDB_OFF_UPPER_STEAL_EXTRA)) {
129                 add_stat(tdb, compare_wrong_offsetbits, 1);
130                 return false;
131         }
132
133         off = val & TDB_OFF_MASK;
134         ecode = tdb_read_convert(tdb, off, rec, sizeof(*rec));
135         if (ecode != TDB_SUCCESS) {
136                 return ecode;
137         }
138
139         if ((h->h & ((1 << 11)-1)) != rec_hash(rec)) {
140                 add_stat(tdb, compare_wrong_rechash, 1);
141                 return false;
142         }
143
144         return key_matches(tdb, rec, off, key);
145 }
146
147 static tdb_off_t hbucket_off(tdb_off_t group_start, unsigned bucket)
148 {
149         return group_start
150                 + (bucket % (1 << TDB_HASH_GROUP_BITS)) * sizeof(tdb_off_t);
151 }
152
153 bool is_subhash(tdb_off_t val)
154 {
155         return (val >> TDB_OFF_UPPER_STEAL_SUBHASH_BIT) & 1;
156 }
157
158 /* FIXME: Guess the depth, don't over-lock! */
159 static tdb_off_t hlock_range(tdb_off_t group, tdb_off_t *size)
160 {
161         *size = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
162         return group << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
163 }
164
165 static tdb_off_t COLD find_in_chain(struct tdb_context *tdb,
166                                     struct tdb_data key,
167                                     tdb_off_t chain,
168                                     struct hash_info *h,
169                                     struct tdb_used_record *rec,
170                                     struct traverse_info *tinfo)
171 {
172         tdb_off_t off, next;
173         enum TDB_ERROR ecode;
174
175         /* In case nothing is free, we set these to zero. */
176         h->home_bucket = h->found_bucket = 0;
177
178         for (off = chain; off; off = next) {
179                 unsigned int i;
180
181                 h->group_start = off;
182                 ecode = tdb_read_convert(tdb, off, h->group, sizeof(h->group));
183                 if (ecode != TDB_SUCCESS) {
184                         return ecode;
185                 }
186
187                 for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
188                         tdb_off_t recoff;
189                         if (!h->group[i]) {
190                                 /* Remember this empty bucket. */
191                                 h->home_bucket = h->found_bucket = i;
192                                 continue;
193                         }
194
195                         /* We can insert extra bits via add_to_hash
196                          * empty bucket logic. */
197                         recoff = h->group[i] & TDB_OFF_MASK;
198                         ecode = tdb_read_convert(tdb, recoff, rec,
199                                                  sizeof(*rec));
200                         if (ecode != TDB_SUCCESS) {
201                                 return ecode;
202                         }
203
204                         ecode = key_matches(tdb, rec, recoff, &key);
205                         if (ecode < 0) {
206                                 return ecode;
207                         }
208                         if (ecode == 1) {
209                                 h->home_bucket = h->found_bucket = i;
210
211                                 if (tinfo) {
212                                         tinfo->levels[tinfo->num_levels]
213                                                 .hashtable = off;
214                                         tinfo->levels[tinfo->num_levels]
215                                                 .total_buckets
216                                                 = 1 << TDB_HASH_GROUP_BITS;
217                                         tinfo->levels[tinfo->num_levels].entry
218                                                 = i;
219                                         tinfo->num_levels++;
220                                 }
221                                 return recoff;
222                         }
223                 }
224                 next = tdb_read_off(tdb, off
225                                     + offsetof(struct tdb_chain, next));
226                 if (TDB_OFF_IS_ERR(next)) {
227                         return next;
228                 }
229                 if (next)
230                         next += sizeof(struct tdb_used_record);
231         }
232         return 0;
233 }
234
235 /* This is the core routine which searches the hashtable for an entry.
236  * On error, no locks are held and -ve is returned.
237  * Otherwise, hinfo is filled in (and the optional tinfo).
238  * If not found, the return value is 0.
239  * If found, the return value is the offset, and *rec is the record. */
240 tdb_off_t find_and_lock(struct tdb_context *tdb,
241                         struct tdb_data key,
242                         int ltype,
243                         struct hash_info *h,
244                         struct tdb_used_record *rec,
245                         struct traverse_info *tinfo)
246 {
247         uint32_t i, group;
248         tdb_off_t hashtable;
249         enum TDB_ERROR ecode;
250
251         h->h = tdb_hash(tdb, key.dptr, key.dsize);
252         h->hash_used = 0;
253         group = use_bits(h, TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
254         h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
255
256         h->hlock_start = hlock_range(group, &h->hlock_range);
257         ecode = tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype,
258                                 TDB_LOCK_WAIT);
259         if (ecode != TDB_SUCCESS) {
260                 return ecode;
261         }
262
263         hashtable = offsetof(struct tdb_header, hashtable);
264         if (tinfo) {
265                 tinfo->toplevel_group = group;
266                 tinfo->num_levels = 1;
267                 tinfo->levels[0].entry = 0;
268                 tinfo->levels[0].hashtable = hashtable
269                         + (group << TDB_HASH_GROUP_BITS) * sizeof(tdb_off_t);
270                 tinfo->levels[0].total_buckets = 1 << TDB_HASH_GROUP_BITS;
271         }
272
273         while (h->hash_used <= 64) {
274                 /* Read in the hash group. */
275                 h->group_start = hashtable
276                         + group * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
277
278                 ecode = tdb_read_convert(tdb, h->group_start, &h->group,
279                                          sizeof(h->group));
280                 if (ecode != TDB_SUCCESS) {
281                         goto fail;
282                 }
283
284                 /* Pointer to another hash table?  Go down... */
285                 if (is_subhash(h->group[h->home_bucket])) {
286                         hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
287                                 + sizeof(struct tdb_used_record);
288                         if (tinfo) {
289                                 /* When we come back, use *next* bucket */
290                                 tinfo->levels[tinfo->num_levels-1].entry
291                                         += h->home_bucket + 1;
292                         }
293                         group = use_bits(h, TDB_SUBLEVEL_HASH_BITS
294                                          - TDB_HASH_GROUP_BITS);
295                         h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
296                         if (tinfo) {
297                                 tinfo->levels[tinfo->num_levels].hashtable
298                                         = hashtable;
299                                 tinfo->levels[tinfo->num_levels].total_buckets
300                                         = 1 << TDB_SUBLEVEL_HASH_BITS;
301                                 tinfo->levels[tinfo->num_levels].entry
302                                         = group << TDB_HASH_GROUP_BITS;
303                                 tinfo->num_levels++;
304                         }
305                         continue;
306                 }
307
308                 /* It's in this group: search (until 0 or all searched) */
309                 for (i = 0, h->found_bucket = h->home_bucket;
310                      i < (1 << TDB_HASH_GROUP_BITS);
311                      i++, h->found_bucket = ((h->found_bucket+1)
312                                              % (1 << TDB_HASH_GROUP_BITS))) {
313                         tdb_bool_err berr;
314                         if (is_subhash(h->group[h->found_bucket]))
315                                 continue;
316
317                         if (!h->group[h->found_bucket])
318                                 break;
319
320                         berr = match(tdb, h, &key, h->group[h->found_bucket],
321                                      rec);
322                         if (berr < 0) {
323                                 ecode = berr;
324                                 goto fail;
325                         }
326                         if (berr) {
327                                 if (tinfo) {
328                                         tinfo->levels[tinfo->num_levels-1].entry
329                                                 += h->found_bucket;
330                                 }
331                                 return h->group[h->found_bucket] & TDB_OFF_MASK;
332                         }
333                 }
334                 /* Didn't find it: h indicates where it would go. */
335                 return 0;
336         }
337
338         return find_in_chain(tdb, key, hashtable, h, rec, tinfo);
339
340 fail:
341         tdb_unlock_hashes(tdb, h->hlock_start, h->hlock_range, ltype);
342         return ecode;
343 }
344
345 /* I wrote a simple test, expanding a hash to 2GB, for the following
346  * cases:
347  * 1) Expanding all the buckets at once,
348  * 2) Expanding the bucket we wanted to place the new entry into.
349  * 3) Expanding the most-populated bucket,
350  *
351  * I measured the worst/average/best density during this process.
352  * 1) 3%/16%/30%
353  * 2) 4%/20%/38%
354  * 3) 6%/22%/41%
355  *
356  * So we figure out the busiest bucket for the moment.
357  */
358 static unsigned fullest_bucket(struct tdb_context *tdb,
359                                const tdb_off_t *group,
360                                unsigned new_bucket)
361 {
362         unsigned counts[1 << TDB_HASH_GROUP_BITS] = { 0 };
363         unsigned int i, best_bucket;
364
365         /* Count the new entry. */
366         counts[new_bucket]++;
367         best_bucket = new_bucket;
368
369         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
370                 unsigned this_bucket;
371
372                 if (is_subhash(group[i]))
373                         continue;
374                 this_bucket = group[i] & TDB_OFF_HASH_GROUP_MASK;
375                 if (++counts[this_bucket] > counts[best_bucket])
376                         best_bucket = this_bucket;
377         }
378
379         return best_bucket;
380 }
381
382 static bool put_into_group(tdb_off_t *group,
383                            unsigned bucket, tdb_off_t encoded)
384 {
385         unsigned int i;
386
387         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
388                 unsigned b = (bucket + i) % (1 << TDB_HASH_GROUP_BITS);
389
390                 if (group[b] == 0) {
391                         group[b] = encoded;
392                         return true;
393                 }
394         }
395         return false;
396 }
397
398 static void force_into_group(tdb_off_t *group,
399                              unsigned bucket, tdb_off_t encoded)
400 {
401         if (!put_into_group(group, bucket, encoded))
402                 abort();
403 }
404
405 static tdb_off_t encode_offset(tdb_off_t new_off, struct hash_info *h)
406 {
407         return h->home_bucket
408                 | new_off
409                 | ((uint64_t)bits_from(h->h,
410                                   64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
411                                   TDB_OFF_UPPER_STEAL_EXTRA)
412                    << TDB_OFF_HASH_EXTRA_BIT);
413 }
414
415 /* Simply overwrite the hash entry we found before. */
416 enum TDB_ERROR replace_in_hash(struct tdb_context *tdb,
417                                struct hash_info *h,
418                                tdb_off_t new_off)
419 {
420         return tdb_write_off(tdb, hbucket_off(h->group_start, h->found_bucket),
421                              encode_offset(new_off, h));
422 }
423
424 /* We slot in anywhere that's empty in the chain. */
425 static enum TDB_ERROR COLD add_to_chain(struct tdb_context *tdb,
426                                         tdb_off_t subhash,
427                                         tdb_off_t new_off)
428 {
429         tdb_off_t entry;
430         enum TDB_ERROR ecode;
431
432         entry = tdb_find_zero_off(tdb, subhash, 1<<TDB_HASH_GROUP_BITS);
433         if (TDB_OFF_IS_ERR(entry)) {
434                 return entry;
435         }
436
437         if (entry == 1 << TDB_HASH_GROUP_BITS) {
438                 tdb_off_t next;
439
440                 next = tdb_read_off(tdb, subhash
441                                     + offsetof(struct tdb_chain, next));
442                 if (TDB_OFF_IS_ERR(next)) {
443                         return next;
444                 }
445
446                 if (!next) {
447                         next = alloc(tdb, 0, sizeof(struct tdb_chain), 0,
448                                      TDB_CHAIN_MAGIC, false);
449                         if (TDB_OFF_IS_ERR(next))
450                                 return next;
451                         ecode = zero_out(tdb,
452                                          next+sizeof(struct tdb_used_record),
453                                          sizeof(struct tdb_chain));
454                         if (ecode != TDB_SUCCESS) {
455                                 return ecode;
456                         }
457                         ecode = tdb_write_off(tdb, subhash
458                                               + offsetof(struct tdb_chain,
459                                                          next),
460                                               next);
461                         if (ecode != TDB_SUCCESS) {
462                                 return ecode;
463                         }
464                 }
465                 return add_to_chain(tdb, next, new_off);
466         }
467
468         return tdb_write_off(tdb, subhash + entry * sizeof(tdb_off_t),
469                              new_off);
470 }
471
472 /* Add into a newly created subhash. */
473 static enum TDB_ERROR add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash,
474                                      unsigned hash_used, tdb_off_t val)
475 {
476         tdb_off_t off = (val & TDB_OFF_MASK), *group;
477         struct hash_info h;
478         unsigned int gnum;
479
480         h.hash_used = hash_used;
481
482         if (hash_used + TDB_SUBLEVEL_HASH_BITS > 64)
483                 return add_to_chain(tdb, subhash, off);
484
485         h.h = hash_record(tdb, off);
486         gnum = use_bits(&h, TDB_SUBLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS);
487         h.group_start = subhash
488                 + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
489         h.home_bucket = use_bits(&h, TDB_HASH_GROUP_BITS);
490
491         group = tdb_access_write(tdb, h.group_start,
492                                  sizeof(*group) << TDB_HASH_GROUP_BITS, true);
493         if (TDB_PTR_IS_ERR(group)) {
494                 return TDB_PTR_ERR(group);
495         }
496         force_into_group(group, h.home_bucket, encode_offset(off, &h));
497         return tdb_access_commit(tdb, group);
498 }
499
500 static enum TDB_ERROR expand_group(struct tdb_context *tdb, struct hash_info *h)
501 {
502         unsigned bucket, num_vals, i, magic;
503         size_t subsize;
504         tdb_off_t subhash;
505         tdb_off_t vals[1 << TDB_HASH_GROUP_BITS];
506         enum TDB_ERROR ecode;
507
508         /* Attach new empty subhash under fullest bucket. */
509         bucket = fullest_bucket(tdb, h->group, h->home_bucket);
510
511         if (h->hash_used == 64) {
512                 add_stat(tdb, alloc_chain, 1);
513                 subsize = sizeof(struct tdb_chain);
514                 magic = TDB_CHAIN_MAGIC;
515         } else {
516                 add_stat(tdb, alloc_subhash, 1);
517                 subsize = (sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS);
518                 magic = TDB_HTABLE_MAGIC;
519         }
520
521         subhash = alloc(tdb, 0, subsize, 0, magic, false);
522         if (TDB_OFF_IS_ERR(subhash)) {
523                 return subhash;
524         }
525
526         ecode = zero_out(tdb, subhash + sizeof(struct tdb_used_record),
527                          subsize);
528         if (ecode != TDB_SUCCESS) {
529                 return ecode;
530         }
531
532         /* Remove any which are destined for bucket or are in wrong place. */
533         num_vals = 0;
534         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
535                 unsigned home_bucket = h->group[i] & TDB_OFF_HASH_GROUP_MASK;
536                 if (!h->group[i] || is_subhash(h->group[i]))
537                         continue;
538                 if (home_bucket == bucket || home_bucket != i) {
539                         vals[num_vals++] = h->group[i];
540                         h->group[i] = 0;
541                 }
542         }
543         /* FIXME: This assert is valid, but we do this during unit test :( */
544         /* assert(num_vals); */
545
546         /* Overwrite expanded bucket with subhash pointer. */
547         h->group[bucket] = subhash | (1ULL << TDB_OFF_UPPER_STEAL_SUBHASH_BIT);
548
549         /* Point to actual contents of record. */
550         subhash += sizeof(struct tdb_used_record);
551
552         /* Put values back. */
553         for (i = 0; i < num_vals; i++) {
554                 unsigned this_bucket = vals[i] & TDB_OFF_HASH_GROUP_MASK;
555
556                 if (this_bucket == bucket) {
557                         ecode = add_to_subhash(tdb, subhash, h->hash_used,
558                                                vals[i]);
559                         if (ecode != TDB_SUCCESS)
560                                 return ecode;
561                 } else {
562                         /* There should be room to put this back. */
563                         force_into_group(h->group, this_bucket, vals[i]);
564                 }
565         }
566         return TDB_SUCCESS;
567 }
568
569 enum TDB_ERROR delete_from_hash(struct tdb_context *tdb, struct hash_info *h)
570 {
571         unsigned int i, num_movers = 0;
572         tdb_off_t movers[1 << TDB_HASH_GROUP_BITS];
573
574         h->group[h->found_bucket] = 0;
575         for (i = 1; i < (1 << TDB_HASH_GROUP_BITS); i++) {
576                 unsigned this_bucket;
577
578                 this_bucket = (h->found_bucket+i) % (1 << TDB_HASH_GROUP_BITS);
579                 /* Empty bucket?  We're done. */
580                 if (!h->group[this_bucket])
581                         break;
582
583                 /* Ignore subhashes. */
584                 if (is_subhash(h->group[this_bucket]))
585                         continue;
586
587                 /* If this one is not happy where it is, we'll move it. */
588                 if ((h->group[this_bucket] & TDB_OFF_HASH_GROUP_MASK)
589                     != this_bucket) {
590                         movers[num_movers++] = h->group[this_bucket];
591                         h->group[this_bucket] = 0;
592                 }
593         }
594
595         /* Put back the ones we erased. */
596         for (i = 0; i < num_movers; i++) {
597                 force_into_group(h->group, movers[i] & TDB_OFF_HASH_GROUP_MASK,
598                                  movers[i]);
599         }
600
601         /* Now we write back the hash group */
602         return tdb_write_convert(tdb, h->group_start,
603                                  h->group, sizeof(h->group));
604 }
605
606 enum TDB_ERROR add_to_hash(struct tdb_context *tdb, struct hash_info *h,
607                            tdb_off_t new_off)
608 {
609         enum TDB_ERROR ecode;
610
611         /* We hit an empty bucket during search?  That's where it goes. */
612         if (!h->group[h->found_bucket]) {
613                 h->group[h->found_bucket] = encode_offset(new_off, h);
614                 /* Write back the modified group. */
615                 return tdb_write_convert(tdb, h->group_start,
616                                          h->group, sizeof(h->group));
617         }
618
619         if (h->hash_used > 64)
620                 return add_to_chain(tdb, h->group_start, new_off);
621
622         /* We're full.  Expand. */
623         ecode = expand_group(tdb, h);
624         if (ecode != TDB_SUCCESS) {
625                 return ecode;
626         }
627
628         if (is_subhash(h->group[h->home_bucket])) {
629                 /* We were expanded! */
630                 tdb_off_t hashtable;
631                 unsigned int gnum;
632
633                 /* Write back the modified group. */
634                 ecode = tdb_write_convert(tdb, h->group_start, h->group,
635                                           sizeof(h->group));
636                 if (ecode != TDB_SUCCESS) {
637                         return ecode;
638                 }
639
640                 /* Move hashinfo down a level. */
641                 hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
642                         + sizeof(struct tdb_used_record);
643                 gnum = use_bits(h,TDB_SUBLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
644                 h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
645                 h->group_start = hashtable
646                         + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
647                 ecode = tdb_read_convert(tdb, h->group_start, &h->group,
648                                          sizeof(h->group));
649                 if (ecode != TDB_SUCCESS) {
650                         return ecode;
651                 }
652         }
653
654         /* Expanding the group must have made room if it didn't choose this
655          * bucket. */
656         if (put_into_group(h->group, h->home_bucket, encode_offset(new_off,h))){
657                 return tdb_write_convert(tdb, h->group_start,
658                                          h->group, sizeof(h->group));
659         }
660
661         /* This can happen if all hashes in group (and us) dropped into same
662          * group in subhash. */
663         return add_to_hash(tdb, h, new_off);
664 }
665
666 /* Traverse support: returns offset of record, or 0 or -ve error. */
667 static tdb_off_t iterate_hash(struct tdb_context *tdb,
668                               struct traverse_info *tinfo)
669 {
670         tdb_off_t off, val, i;
671         struct traverse_level *tlevel;
672
673         tlevel = &tinfo->levels[tinfo->num_levels-1];
674
675 again:
676         for (i = tdb_find_nonzero_off(tdb, tlevel->hashtable,
677                                       tlevel->entry, tlevel->total_buckets);
678              i != tlevel->total_buckets;
679              i = tdb_find_nonzero_off(tdb, tlevel->hashtable,
680                                       i+1, tlevel->total_buckets)) {
681                 if (TDB_OFF_IS_ERR(i)) {
682                         return i;
683                 }
684
685                 val = tdb_read_off(tdb, tlevel->hashtable+sizeof(tdb_off_t)*i);
686                 if (TDB_OFF_IS_ERR(val)) {
687                         return val;
688                 }
689
690                 off = val & TDB_OFF_MASK;
691
692                 /* This makes the delete-all-in-traverse case work
693                  * (and simplifies our logic a little). */
694                 if (off == tinfo->prev)
695                         continue;
696
697                 tlevel->entry = i;
698
699                 if (!is_subhash(val)) {
700                         /* Found one. */
701                         tinfo->prev = off;
702                         return off;
703                 }
704
705                 /* When we come back, we want the next one */
706                 tlevel->entry++;
707                 tinfo->num_levels++;
708                 tlevel++;
709                 tlevel->hashtable = off + sizeof(struct tdb_used_record);
710                 tlevel->entry = 0;
711                 /* Next level is a chain? */
712                 if (unlikely(tinfo->num_levels == TDB_MAX_LEVELS + 1))
713                         tlevel->total_buckets = (1 << TDB_HASH_GROUP_BITS);
714                 else
715                         tlevel->total_buckets = (1 << TDB_SUBLEVEL_HASH_BITS);
716                 goto again;
717         }
718
719         /* Nothing there? */
720         if (tinfo->num_levels == 1)
721                 return 0;
722
723         /* Handle chained entries. */
724         if (unlikely(tinfo->num_levels == TDB_MAX_LEVELS + 1)) {
725                 tlevel->hashtable = tdb_read_off(tdb, tlevel->hashtable
726                                                  + offsetof(struct tdb_chain,
727                                                             next));
728                 if (TDB_OFF_IS_ERR(tlevel->hashtable)) {
729                         return tlevel->hashtable;
730                 }
731                 if (tlevel->hashtable) {
732                         tlevel->hashtable += sizeof(struct tdb_used_record);
733                         tlevel->entry = 0;
734                         goto again;
735                 }
736         }
737
738         /* Go back up and keep searching. */
739         tinfo->num_levels--;
740         tlevel--;
741         goto again;
742 }
743
744 /* Return success if we find something, TDB_ERR_NOEXIST if none. */
745 enum TDB_ERROR next_in_hash(struct tdb_context *tdb,
746                             struct traverse_info *tinfo,
747                             TDB_DATA *kbuf, size_t *dlen)
748 {
749         const unsigned group_bits = TDB_TOPLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS;
750         tdb_off_t hl_start, hl_range, off;
751         enum TDB_ERROR ecode;
752
753         while (tinfo->toplevel_group < (1 << group_bits)) {
754                 hl_start = (tdb_off_t)tinfo->toplevel_group
755                         << (64 - group_bits);
756                 hl_range = 1ULL << group_bits;
757                 ecode = tdb_lock_hashes(tdb, hl_start, hl_range, F_RDLCK,
758                                         TDB_LOCK_WAIT);
759                 if (ecode != TDB_SUCCESS) {
760                         return ecode;
761                 }
762
763                 off = iterate_hash(tdb, tinfo);
764                 if (off) {
765                         struct tdb_used_record rec;
766
767                         if (TDB_OFF_IS_ERR(off)) {
768                                 ecode = off;
769                                 goto fail;
770                         }
771
772                         ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
773                         if (ecode != TDB_SUCCESS) {
774                                 goto fail;
775                         }
776                         if (rec_magic(&rec) != TDB_USED_MAGIC) {
777                                 ecode = tdb_logerr(tdb, TDB_ERR_CORRUPT,
778                                                    TDB_LOG_ERROR,
779                                                    "next_in_hash:"
780                                                    " corrupt record at %llu",
781                                                    (long long)off);
782                                 goto fail;
783                         }
784
785                         kbuf->dsize = rec_key_length(&rec);
786
787                         /* They want data as well? */
788                         if (dlen) {
789                                 *dlen = rec_data_length(&rec);
790                                 kbuf->dptr = tdb_alloc_read(tdb,
791                                                             off + sizeof(rec),
792                                                             kbuf->dsize
793                                                             + *dlen);
794                         } else {
795                                 kbuf->dptr = tdb_alloc_read(tdb,
796                                                             off + sizeof(rec),
797                                                             kbuf->dsize);
798                         }
799                         tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK);
800                         if (TDB_PTR_IS_ERR(kbuf->dptr)) {
801                                 return TDB_PTR_ERR(kbuf->dptr);
802                         }
803                         return TDB_SUCCESS;
804                 }
805
806                 tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK);
807
808                 tinfo->toplevel_group++;
809                 tinfo->levels[0].hashtable
810                         += (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
811                 tinfo->levels[0].entry = 0;
812         }
813         return TDB_ERR_NOEXIST;
814
815 fail:
816         tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK);
817         return ecode;
818
819 }
820
821 enum TDB_ERROR first_in_hash(struct tdb_context *tdb,
822                              struct traverse_info *tinfo,
823                              TDB_DATA *kbuf, size_t *dlen)
824 {
825         tinfo->prev = 0;
826         tinfo->toplevel_group = 0;
827         tinfo->num_levels = 1;
828         tinfo->levels[0].hashtable = offsetof(struct tdb_header, hashtable);
829         tinfo->levels[0].entry = 0;
830         tinfo->levels[0].total_buckets = (1 << TDB_HASH_GROUP_BITS);
831
832         return next_in_hash(tdb, tinfo, kbuf, dlen);
833 }
834
835 /* Even if the entry isn't in this hash bucket, you'd have to lock this
836  * bucket to find it. */
837 static enum TDB_ERROR chainlock(struct tdb_context *tdb, const TDB_DATA *key,
838                                 int ltype, enum tdb_lock_flags waitflag,
839                                 const char *func)
840 {
841         enum TDB_ERROR ecode;
842         uint64_t h = tdb_hash(tdb, key->dptr, key->dsize);
843         tdb_off_t lockstart, locksize;
844         unsigned int group, gbits;
845
846         gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
847         group = bits_from(h, 64 - gbits, gbits);
848
849         lockstart = hlock_range(group, &locksize);
850
851         ecode = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag);
852         tdb_trace_1rec(tdb, func, *key);
853         return ecode;
854 }
855
856 /* lock/unlock one hash chain. This is meant to be used to reduce
857    contention - it cannot guarantee how many records will be locked */
858 enum TDB_ERROR tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
859 {
860         return chainlock(tdb, &key, F_WRLCK, TDB_LOCK_WAIT, "tdb_chainlock");
861 }
862
863 enum TDB_ERROR tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
864 {
865         uint64_t h = tdb_hash(tdb, key.dptr, key.dsize);
866         tdb_off_t lockstart, locksize;
867         unsigned int group, gbits;
868
869         gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
870         group = bits_from(h, 64 - gbits, gbits);
871
872         lockstart = hlock_range(group, &locksize);
873
874         tdb_trace_1rec(tdb, "tdb_chainunlock", key);
875         return tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK);
876 }