]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/hash.c
260c36f266f0dc8dccaa331993153fe69d6c9a64
[ccan] / ccan / tdb2 / hash.c
1  /*
2    Trivial Database 2: hash handling
3    Copyright (C) Rusty Russell 2010
4
5    This library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 3 of the License, or (at your option) any later version.
9
10    This library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 */
18 #include "private.h"
19 #include <assert.h>
20 #include <ccan/hash/hash.h>
21
22 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
23                              void *arg)
24 {
25         uint64_t ret;
26         /* hash64_stable assumes lower bits are more important; they are a
27          * slightly better hash.  We use the upper bits first, so swap them. */
28         ret = hash64_stable((const unsigned char *)key, length, seed);
29         return (ret >> 32) | (ret << 32);
30 }
31
32 void tdb_hash_init(struct tdb_context *tdb)
33 {
34         tdb->khash = jenkins_hash;
35         tdb->hash_priv = NULL;
36 }
37
38 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
39 {
40         return tdb->khash(ptr, len, tdb->hash_seed, tdb->hash_priv);
41 }
42
43 uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off)
44 {
45         const struct tdb_used_record *r;
46         const void *key;
47         uint64_t klen, hash;
48
49         r = tdb_access_read(tdb, off, sizeof(*r), true);
50         if (TDB_PTR_IS_ERR(r)) {
51                 tdb->ecode = TDB_PTR_ERR(r);
52                 /* FIXME */
53                 return 0;
54         }
55
56         klen = rec_key_length(r);
57         tdb_access_release(tdb, r);
58
59         key = tdb_access_read(tdb, off + sizeof(*r), klen, false);
60         if (TDB_PTR_IS_ERR(key)) {
61                 tdb->ecode = TDB_PTR_ERR(key);
62                 return 0;
63         }
64
65         hash = tdb_hash(tdb, key, klen);
66         tdb_access_release(tdb, key);
67         return hash;
68 }
69
70 /* Get bits from a value. */
71 static uint32_t bits_from(uint64_t val, unsigned start, unsigned num)
72 {
73         assert(num <= 32);
74         return (val >> start) & ((1U << num) - 1);
75 }
76
77 /* We take bits from the top: that way we can lock whole sections of the hash
78  * by using lock ranges. */
79 static uint32_t use_bits(struct hash_info *h, unsigned num)
80 {
81         h->hash_used += num;
82         return bits_from(h->h, 64 - h->hash_used, num);
83 }
84
85 static bool key_matches(struct tdb_context *tdb,
86                         const struct tdb_used_record *rec,
87                         tdb_off_t off,
88                         const struct tdb_data *key)
89 {
90         bool ret = false;
91         const char *rkey;
92
93         if (rec_key_length(rec) != key->dsize) {
94                 add_stat(tdb, compare_wrong_keylen, 1);
95                 return ret;
96         }
97
98         rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false);
99         if (TDB_PTR_IS_ERR(rkey)) {
100                 tdb->ecode = TDB_PTR_ERR(rkey);
101                 return ret;
102         }
103         if (memcmp(rkey, key->dptr, key->dsize) == 0)
104                 ret = true;
105         else
106                 add_stat(tdb, compare_wrong_keycmp, 1);
107         tdb_access_release(tdb, rkey);
108         return ret;
109 }
110
111 /* Does entry match? */
112 static bool match(struct tdb_context *tdb,
113                   struct hash_info *h,
114                   const struct tdb_data *key,
115                   tdb_off_t val,
116                   struct tdb_used_record *rec)
117 {
118         tdb_off_t off;
119         enum TDB_ERROR ecode;
120
121         add_stat(tdb, compares, 1);
122         /* Desired bucket must match. */
123         if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK)) {
124                 add_stat(tdb, compare_wrong_bucket, 1);
125                 return false;
126         }
127
128         /* Top bits of offset == next bits of hash. */
129         if (bits_from(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA)
130             != bits_from(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
131                     TDB_OFF_UPPER_STEAL_EXTRA)) {
132                 add_stat(tdb, compare_wrong_offsetbits, 1);
133                 return false;
134         }
135
136         off = val & TDB_OFF_MASK;
137         ecode = tdb_read_convert(tdb, off, rec, sizeof(*rec));
138         if (ecode != TDB_SUCCESS) {
139                 tdb->ecode = ecode;
140                 return false;
141         }
142
143         if ((h->h & ((1 << 11)-1)) != rec_hash(rec)) {
144                 add_stat(tdb, compare_wrong_rechash, 1);
145                 return false;
146         }
147
148         return key_matches(tdb, rec, off, key);
149 }
150
151 static tdb_off_t hbucket_off(tdb_off_t group_start, unsigned bucket)
152 {
153         return group_start
154                 + (bucket % (1 << TDB_HASH_GROUP_BITS)) * sizeof(tdb_off_t);
155 }
156
157 bool is_subhash(tdb_off_t val)
158 {
159         return (val >> TDB_OFF_UPPER_STEAL_SUBHASH_BIT) & 1;
160 }
161
162 /* FIXME: Guess the depth, don't over-lock! */
163 static tdb_off_t hlock_range(tdb_off_t group, tdb_off_t *size)
164 {
165         *size = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
166         return group << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
167 }
168
169 static tdb_off_t COLD find_in_chain(struct tdb_context *tdb,
170                                     struct tdb_data key,
171                                     tdb_off_t chain,
172                                     struct hash_info *h,
173                                     struct tdb_used_record *rec,
174                                     struct traverse_info *tinfo)
175 {
176         tdb_off_t off, next;
177         enum TDB_ERROR ecode;
178
179         /* In case nothing is free, we set these to zero. */
180         h->home_bucket = h->found_bucket = 0;
181
182         for (off = chain; off; off = next) {
183                 unsigned int i;
184
185                 h->group_start = off;
186                 ecode = tdb_read_convert(tdb, off, h->group, sizeof(h->group));
187                 if (ecode != TDB_SUCCESS) {
188                         tdb->ecode = ecode;
189                         return TDB_OFF_ERR;
190                 }
191
192                 for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
193                         tdb_off_t recoff;
194                         if (!h->group[i]) {
195                                 /* Remember this empty bucket. */
196                                 h->home_bucket = h->found_bucket = i;
197                                 continue;
198                         }
199
200                         /* We can insert extra bits via add_to_hash
201                          * empty bucket logic. */
202                         recoff = h->group[i] & TDB_OFF_MASK;
203                         ecode = tdb_read_convert(tdb, recoff, rec,
204                                                  sizeof(*rec));
205                         if (ecode != TDB_SUCCESS) {
206                                 tdb->ecode = ecode;
207                                 return TDB_OFF_ERR;
208                         }
209
210                         if (key_matches(tdb, rec, recoff, &key)) {
211                                 h->home_bucket = h->found_bucket = i;
212
213                                 if (tinfo) {
214                                         tinfo->levels[tinfo->num_levels]
215                                                 .hashtable = off;
216                                         tinfo->levels[tinfo->num_levels]
217                                                 .total_buckets
218                                                 = 1 << TDB_HASH_GROUP_BITS;
219                                         tinfo->levels[tinfo->num_levels].entry
220                                                 = i;
221                                         tinfo->num_levels++;
222                                 }
223                                 return recoff;
224                         }
225                 }
226                 next = tdb_read_off(tdb, off
227                                     + offsetof(struct tdb_chain, next));
228                 if (next == TDB_OFF_ERR)
229                         return TDB_OFF_ERR;
230                 if (next)
231                         next += sizeof(struct tdb_used_record);
232         }
233         return 0;
234 }
235
236 /* This is the core routine which searches the hashtable for an entry.
237  * On error, no locks are held and TDB_OFF_ERR is returned.
238  * Otherwise, hinfo is filled in (and the optional tinfo).
239  * If not found, the return value is 0.
240  * If found, the return value is the offset, and *rec is the record. */
241 tdb_off_t find_and_lock(struct tdb_context *tdb,
242                         struct tdb_data key,
243                         int ltype,
244                         struct hash_info *h,
245                         struct tdb_used_record *rec,
246                         struct traverse_info *tinfo)
247 {
248         uint32_t i, group;
249         tdb_off_t hashtable;
250         enum TDB_ERROR ecode;
251
252         h->h = tdb_hash(tdb, key.dptr, key.dsize);
253         h->hash_used = 0;
254         group = use_bits(h, TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
255         h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
256
257         h->hlock_start = hlock_range(group, &h->hlock_range);
258         ecode = tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype,
259                                 TDB_LOCK_WAIT);
260         if (ecode != TDB_SUCCESS) {
261                 tdb->ecode = ecode;
262                 return TDB_OFF_ERR;
263         }
264
265         hashtable = offsetof(struct tdb_header, hashtable);
266         if (tinfo) {
267                 tinfo->toplevel_group = group;
268                 tinfo->num_levels = 1;
269                 tinfo->levels[0].entry = 0;
270                 tinfo->levels[0].hashtable = hashtable
271                         + (group << TDB_HASH_GROUP_BITS) * sizeof(tdb_off_t);
272                 tinfo->levels[0].total_buckets = 1 << TDB_HASH_GROUP_BITS;
273         }
274
275         while (h->hash_used <= 64) {
276                 /* Read in the hash group. */
277                 h->group_start = hashtable
278                         + group * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
279
280                 ecode = tdb_read_convert(tdb, h->group_start, &h->group,
281                                          sizeof(h->group));
282                 if (ecode != TDB_SUCCESS) {
283                         tdb->ecode = ecode;
284                         goto fail;
285                 }
286
287                 /* Pointer to another hash table?  Go down... */
288                 if (is_subhash(h->group[h->home_bucket])) {
289                         hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
290                                 + sizeof(struct tdb_used_record);
291                         if (tinfo) {
292                                 /* When we come back, use *next* bucket */
293                                 tinfo->levels[tinfo->num_levels-1].entry
294                                         += h->home_bucket + 1;
295                         }
296                         group = use_bits(h, TDB_SUBLEVEL_HASH_BITS
297                                          - TDB_HASH_GROUP_BITS);
298                         h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
299                         if (tinfo) {
300                                 tinfo->levels[tinfo->num_levels].hashtable
301                                         = hashtable;
302                                 tinfo->levels[tinfo->num_levels].total_buckets
303                                         = 1 << TDB_SUBLEVEL_HASH_BITS;
304                                 tinfo->levels[tinfo->num_levels].entry
305                                         = group << TDB_HASH_GROUP_BITS;
306                                 tinfo->num_levels++;
307                         }
308                         continue;
309                 }
310
311                 /* It's in this group: search (until 0 or all searched) */
312                 for (i = 0, h->found_bucket = h->home_bucket;
313                      i < (1 << TDB_HASH_GROUP_BITS);
314                      i++, h->found_bucket = ((h->found_bucket+1)
315                                              % (1 << TDB_HASH_GROUP_BITS))) {
316                         if (is_subhash(h->group[h->found_bucket]))
317                                 continue;
318
319                         if (!h->group[h->found_bucket])
320                                 break;
321
322                         if (match(tdb, h, &key, h->group[h->found_bucket],
323                                   rec)) {
324                                 if (tinfo) {
325                                         tinfo->levels[tinfo->num_levels-1].entry
326                                                 += h->found_bucket;
327                                 }
328                                 return h->group[h->found_bucket] & TDB_OFF_MASK;
329                         }
330                 }
331                 /* Didn't find it: h indicates where it would go. */
332                 return 0;
333         }
334
335         return find_in_chain(tdb, key, hashtable, h, rec, tinfo);
336
337 fail:
338         tdb_unlock_hashes(tdb, h->hlock_start, h->hlock_range, ltype);
339         return TDB_OFF_ERR;
340 }
341
342 /* I wrote a simple test, expanding a hash to 2GB, for the following
343  * cases:
344  * 1) Expanding all the buckets at once,
345  * 2) Expanding the bucket we wanted to place the new entry into.
346  * 3) Expanding the most-populated bucket,
347  *
348  * I measured the worst/average/best density during this process.
349  * 1) 3%/16%/30%
350  * 2) 4%/20%/38%
351  * 3) 6%/22%/41%
352  *
353  * So we figure out the busiest bucket for the moment.
354  */
355 static unsigned fullest_bucket(struct tdb_context *tdb,
356                                const tdb_off_t *group,
357                                unsigned new_bucket)
358 {
359         unsigned counts[1 << TDB_HASH_GROUP_BITS] = { 0 };
360         unsigned int i, best_bucket;
361
362         /* Count the new entry. */
363         counts[new_bucket]++;
364         best_bucket = new_bucket;
365
366         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
367                 unsigned this_bucket;
368
369                 if (is_subhash(group[i]))
370                         continue;
371                 this_bucket = group[i] & TDB_OFF_HASH_GROUP_MASK;
372                 if (++counts[this_bucket] > counts[best_bucket])
373                         best_bucket = this_bucket;
374         }
375
376         return best_bucket;
377 }
378
379 static bool put_into_group(tdb_off_t *group,
380                            unsigned bucket, tdb_off_t encoded)
381 {
382         unsigned int i;
383
384         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
385                 unsigned b = (bucket + i) % (1 << TDB_HASH_GROUP_BITS);
386
387                 if (group[b] == 0) {
388                         group[b] = encoded;
389                         return true;
390                 }
391         }
392         return false;
393 }
394
395 static void force_into_group(tdb_off_t *group,
396                              unsigned bucket, tdb_off_t encoded)
397 {
398         if (!put_into_group(group, bucket, encoded))
399                 abort();
400 }
401
402 static tdb_off_t encode_offset(tdb_off_t new_off, struct hash_info *h)
403 {
404         return h->home_bucket
405                 | new_off
406                 | ((uint64_t)bits_from(h->h,
407                                   64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
408                                   TDB_OFF_UPPER_STEAL_EXTRA)
409                    << TDB_OFF_HASH_EXTRA_BIT);
410 }
411
412 /* Simply overwrite the hash entry we found before. */
413 int replace_in_hash(struct tdb_context *tdb,
414                     struct hash_info *h,
415                     tdb_off_t new_off)
416 {
417         enum TDB_ERROR ecode;
418
419         ecode = tdb_write_off(tdb, hbucket_off(h->group_start, h->found_bucket),
420                               encode_offset(new_off, h));
421         if (ecode != TDB_SUCCESS) {
422                 tdb->ecode = ecode;
423                 return -1;
424         }
425         return 0;
426 }
427
428 /* We slot in anywhere that's empty in the chain. */
429 static int COLD add_to_chain(struct tdb_context *tdb,
430                              tdb_off_t subhash,
431                              tdb_off_t new_off)
432 {
433         size_t entry = tdb_find_zero_off(tdb, subhash, 1<<TDB_HASH_GROUP_BITS);
434         enum TDB_ERROR ecode;
435
436         if (entry == 1 << TDB_HASH_GROUP_BITS) {
437                 tdb_off_t next;
438
439                 next = tdb_read_off(tdb, subhash
440                                     + offsetof(struct tdb_chain, next));
441                 if (next == TDB_OFF_ERR)
442                         return -1;
443
444                 if (!next) {
445                         next = alloc(tdb, 0, sizeof(struct tdb_chain), 0,
446                                      TDB_CHAIN_MAGIC, false);
447                         if (next == TDB_OFF_ERR)
448                                 return -1;
449                         ecode = zero_out(tdb,
450                                          next+sizeof(struct tdb_used_record),
451                                          sizeof(struct tdb_chain));
452                         if (ecode != TDB_SUCCESS) {
453                                 tdb->ecode = ecode;
454                                 return -1;
455                         }
456                         ecode = tdb_write_off(tdb, subhash
457                                               + offsetof(struct tdb_chain,
458                                                          next),
459                                               next);
460                         if (ecode != TDB_SUCCESS) {
461                                 tdb->ecode = ecode;
462                                 return -1;
463                         }
464                 }
465                 return add_to_chain(tdb, next, new_off);
466         }
467
468         ecode = tdb_write_off(tdb, subhash + entry * sizeof(tdb_off_t),
469                               new_off);
470         if (ecode != TDB_SUCCESS) {
471                 tdb->ecode = ecode;
472                 return -1;
473         }
474         return 0;
475 }
476
477 /* Add into a newly created subhash. */
478 static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash,
479                           unsigned hash_used, tdb_off_t val)
480 {
481         tdb_off_t off = (val & TDB_OFF_MASK), *group;
482         struct hash_info h;
483         unsigned int gnum;
484         enum TDB_ERROR ecode;
485
486         h.hash_used = hash_used;
487
488         if (hash_used + TDB_SUBLEVEL_HASH_BITS > 64)
489                 return add_to_chain(tdb, subhash, off);
490
491         h.h = hash_record(tdb, off);
492         gnum = use_bits(&h, TDB_SUBLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS);
493         h.group_start = subhash
494                 + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
495         h.home_bucket = use_bits(&h, TDB_HASH_GROUP_BITS);
496
497         group = tdb_access_write(tdb, h.group_start,
498                                  sizeof(*group) << TDB_HASH_GROUP_BITS, true);
499         if (TDB_PTR_IS_ERR(group)) {
500                 tdb->ecode = TDB_PTR_ERR(group);
501                 return -1;
502         }
503         force_into_group(group, h.home_bucket, encode_offset(off, &h));
504         ecode = tdb_access_commit(tdb, group);
505         if (ecode != TDB_SUCCESS) {
506                 tdb->ecode = ecode;
507                 return -1;
508         }
509         return 0;
510 }
511
512 static int expand_group(struct tdb_context *tdb, struct hash_info *h)
513 {
514         unsigned bucket, num_vals, i, magic;
515         size_t subsize;
516         tdb_off_t subhash;
517         tdb_off_t vals[1 << TDB_HASH_GROUP_BITS];
518         enum TDB_ERROR ecode;
519
520         /* Attach new empty subhash under fullest bucket. */
521         bucket = fullest_bucket(tdb, h->group, h->home_bucket);
522
523         if (h->hash_used == 64) {
524                 add_stat(tdb, alloc_chain, 1);
525                 subsize = sizeof(struct tdb_chain);
526                 magic = TDB_CHAIN_MAGIC;
527         } else {
528                 add_stat(tdb, alloc_subhash, 1);
529                 subsize = (sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS);
530                 magic = TDB_HTABLE_MAGIC;
531         }
532
533         subhash = alloc(tdb, 0, subsize, 0, magic, false);
534         if (subhash == TDB_OFF_ERR)
535                 return -1;
536
537         ecode = zero_out(tdb, subhash + sizeof(struct tdb_used_record),
538                          subsize);
539         if (ecode != TDB_SUCCESS) {
540                 tdb->ecode = ecode;
541                 return -1;
542         }
543
544         /* Remove any which are destined for bucket or are in wrong place. */
545         num_vals = 0;
546         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
547                 unsigned home_bucket = h->group[i] & TDB_OFF_HASH_GROUP_MASK;
548                 if (!h->group[i] || is_subhash(h->group[i]))
549                         continue;
550                 if (home_bucket == bucket || home_bucket != i) {
551                         vals[num_vals++] = h->group[i];
552                         h->group[i] = 0;
553                 }
554         }
555         /* FIXME: This assert is valid, but we do this during unit test :( */
556         /* assert(num_vals); */
557
558         /* Overwrite expanded bucket with subhash pointer. */
559         h->group[bucket] = subhash | (1ULL << TDB_OFF_UPPER_STEAL_SUBHASH_BIT);
560
561         /* Point to actual contents of record. */
562         subhash += sizeof(struct tdb_used_record);
563
564         /* Put values back. */
565         for (i = 0; i < num_vals; i++) {
566                 unsigned this_bucket = vals[i] & TDB_OFF_HASH_GROUP_MASK;
567
568                 if (this_bucket == bucket) {
569                         if (add_to_subhash(tdb, subhash, h->hash_used, vals[i]))
570                                 return -1;
571                 } else {
572                         /* There should be room to put this back. */
573                         force_into_group(h->group, this_bucket, vals[i]);
574                 }
575         }
576         return 0;
577 }
578
579 int delete_from_hash(struct tdb_context *tdb, struct hash_info *h)
580 {
581         unsigned int i, num_movers = 0;
582         tdb_off_t movers[1 << TDB_HASH_GROUP_BITS];
583         enum TDB_ERROR ecode;
584
585         h->group[h->found_bucket] = 0;
586         for (i = 1; i < (1 << TDB_HASH_GROUP_BITS); i++) {
587                 unsigned this_bucket;
588
589                 this_bucket = (h->found_bucket+i) % (1 << TDB_HASH_GROUP_BITS);
590                 /* Empty bucket?  We're done. */
591                 if (!h->group[this_bucket])
592                         break;
593
594                 /* Ignore subhashes. */
595                 if (is_subhash(h->group[this_bucket]))
596                         continue;
597
598                 /* If this one is not happy where it is, we'll move it. */
599                 if ((h->group[this_bucket] & TDB_OFF_HASH_GROUP_MASK)
600                     != this_bucket) {
601                         movers[num_movers++] = h->group[this_bucket];
602                         h->group[this_bucket] = 0;
603                 }
604         }
605
606         /* Put back the ones we erased. */
607         for (i = 0; i < num_movers; i++) {
608                 force_into_group(h->group, movers[i] & TDB_OFF_HASH_GROUP_MASK,
609                                  movers[i]);
610         }
611
612         /* Now we write back the hash group */
613         ecode = tdb_write_convert(tdb, h->group_start,
614                                   h->group, sizeof(h->group));
615         if (ecode != TDB_SUCCESS) {
616                 tdb->ecode = ecode;
617                 return -1;
618         }
619         return 0;
620 }
621
622 int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off)
623 {
624         enum TDB_ERROR ecode;
625
626         /* We hit an empty bucket during search?  That's where it goes. */
627         if (!h->group[h->found_bucket]) {
628                 h->group[h->found_bucket] = encode_offset(new_off, h);
629                 /* Write back the modified group. */
630                 ecode = tdb_write_convert(tdb, h->group_start,
631                                           h->group, sizeof(h->group));
632                 if (ecode != TDB_SUCCESS) {
633                         tdb->ecode = ecode;
634                         return -1;
635                 }
636                 return 0;
637         }
638
639         if (h->hash_used > 64)
640                 return add_to_chain(tdb, h->group_start, new_off);
641
642         /* We're full.  Expand. */
643         if (expand_group(tdb, h) == -1)
644                 return -1;
645
646         if (is_subhash(h->group[h->home_bucket])) {
647                 /* We were expanded! */
648                 tdb_off_t hashtable;
649                 unsigned int gnum;
650
651                 /* Write back the modified group. */
652                 ecode = tdb_write_convert(tdb, h->group_start, h->group,
653                                           sizeof(h->group));
654                 if (ecode != TDB_SUCCESS) {
655                         tdb->ecode = ecode;
656                         return -1;
657                 }
658
659                 /* Move hashinfo down a level. */
660                 hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
661                         + sizeof(struct tdb_used_record);
662                 gnum = use_bits(h,TDB_SUBLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
663                 h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
664                 h->group_start = hashtable
665                         + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
666                 ecode = tdb_read_convert(tdb, h->group_start, &h->group,
667                                          sizeof(h->group));
668                 if (ecode != TDB_SUCCESS) {
669                         tdb->ecode = ecode;
670                         return -1;
671                 }
672         }
673
674         /* Expanding the group must have made room if it didn't choose this
675          * bucket. */
676         if (put_into_group(h->group, h->home_bucket, encode_offset(new_off,h))){
677                 ecode = tdb_write_convert(tdb, h->group_start,
678                                           h->group, sizeof(h->group));
679                 if (ecode != TDB_SUCCESS) {
680                         tdb->ecode = ecode;
681                         return -1;
682                 }
683                 return 0;
684         }
685
686         /* This can happen if all hashes in group (and us) dropped into same
687          * group in subhash. */
688         return add_to_hash(tdb, h, new_off);
689 }
690
691 /* Traverse support: returns offset of record, or 0 or TDB_OFF_ERR. */
692 static tdb_off_t iterate_hash(struct tdb_context *tdb,
693                               struct traverse_info *tinfo)
694 {
695         tdb_off_t off, val;
696         unsigned int i;
697         struct traverse_level *tlevel;
698
699         tlevel = &tinfo->levels[tinfo->num_levels-1];
700
701 again:
702         for (i = tdb_find_nonzero_off(tdb, tlevel->hashtable,
703                                       tlevel->entry, tlevel->total_buckets);
704              i != tlevel->total_buckets;
705              i = tdb_find_nonzero_off(tdb, tlevel->hashtable,
706                                       i+1, tlevel->total_buckets)) {
707                 val = tdb_read_off(tdb, tlevel->hashtable+sizeof(tdb_off_t)*i);
708                 if (unlikely(val == TDB_OFF_ERR))
709                         return TDB_OFF_ERR;
710
711                 off = val & TDB_OFF_MASK;
712
713                 /* This makes the delete-all-in-traverse case work
714                  * (and simplifies our logic a little). */
715                 if (off == tinfo->prev)
716                         continue;
717
718                 tlevel->entry = i;
719
720                 if (!is_subhash(val)) {
721                         /* Found one. */
722                         tinfo->prev = off;
723                         return off;
724                 }
725
726                 /* When we come back, we want the next one */
727                 tlevel->entry++;
728                 tinfo->num_levels++;
729                 tlevel++;
730                 tlevel->hashtable = off + sizeof(struct tdb_used_record);
731                 tlevel->entry = 0;
732                 /* Next level is a chain? */
733                 if (unlikely(tinfo->num_levels == TDB_MAX_LEVELS + 1))
734                         tlevel->total_buckets = (1 << TDB_HASH_GROUP_BITS);
735                 else
736                         tlevel->total_buckets = (1 << TDB_SUBLEVEL_HASH_BITS);
737                 goto again;
738         }
739
740         /* Nothing there? */
741         if (tinfo->num_levels == 1)
742                 return 0;
743
744         /* Handle chained entries. */
745         if (unlikely(tinfo->num_levels == TDB_MAX_LEVELS + 1)) {
746                 tlevel->hashtable = tdb_read_off(tdb, tlevel->hashtable
747                                                  + offsetof(struct tdb_chain,
748                                                             next));
749                 if (tlevel->hashtable == TDB_OFF_ERR)
750                         return TDB_OFF_ERR;
751                 if (tlevel->hashtable) {
752                         tlevel->hashtable += sizeof(struct tdb_used_record);
753                         tlevel->entry = 0;
754                         goto again;
755                 }
756         }
757
758         /* Go back up and keep searching. */
759         tinfo->num_levels--;
760         tlevel--;
761         goto again;
762 }
763
764 /* Return 1 if we find something, 0 if not, -1 on error. */
765 int next_in_hash(struct tdb_context *tdb,
766                  struct traverse_info *tinfo,
767                  TDB_DATA *kbuf, size_t *dlen)
768 {
769         const unsigned group_bits = TDB_TOPLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS;
770         tdb_off_t hl_start, hl_range, off;
771         enum TDB_ERROR ecode;
772
773         while (tinfo->toplevel_group < (1 << group_bits)) {
774                 hl_start = (tdb_off_t)tinfo->toplevel_group
775                         << (64 - group_bits);
776                 hl_range = 1ULL << group_bits;
777                 ecode = tdb_lock_hashes(tdb, hl_start, hl_range, F_RDLCK,
778                                         TDB_LOCK_WAIT);
779                 if (ecode != TDB_SUCCESS) {
780                         tdb->ecode = ecode;
781                         return -1;
782                 }
783
784                 off = iterate_hash(tdb, tinfo);
785                 if (off) {
786                         struct tdb_used_record rec;
787
788                         ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
789                         if (ecode != TDB_SUCCESS) {
790                                 tdb->ecode = ecode;
791                                 tdb_unlock_hashes(tdb,
792                                                   hl_start, hl_range, F_RDLCK);
793                                 return -1;
794                         }
795                         if (rec_magic(&rec) != TDB_USED_MAGIC) {
796                                 tdb_logerr(tdb, TDB_ERR_CORRUPT,
797                                            TDB_LOG_ERROR,
798                                            "next_in_hash:"
799                                            " corrupt record at %llu",
800                                            (long long)off);
801                                 return -1;
802                         }
803
804                         kbuf->dsize = rec_key_length(&rec);
805
806                         /* They want data as well? */
807                         if (dlen) {
808                                 *dlen = rec_data_length(&rec);
809                                 kbuf->dptr = tdb_alloc_read(tdb,
810                                                             off + sizeof(rec),
811                                                             kbuf->dsize
812                                                             + *dlen);
813                         } else {
814                                 kbuf->dptr = tdb_alloc_read(tdb,
815                                                             off + sizeof(rec),
816                                                             kbuf->dsize);
817                         }
818                         tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK);
819                         if (TDB_PTR_IS_ERR(kbuf->dptr)) {
820                                 tdb->ecode = TDB_PTR_ERR(kbuf->dptr);
821                                 return -1;
822                         }
823                         return 1;
824                 }
825
826                 tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK);
827
828                 tinfo->toplevel_group++;
829                 tinfo->levels[0].hashtable
830                         += (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
831                 tinfo->levels[0].entry = 0;
832         }
833         return 0;
834 }
835
836 /* Return 1 if we find something, 0 if not, -1 on error. */
837 int first_in_hash(struct tdb_context *tdb,
838                   struct traverse_info *tinfo,
839                   TDB_DATA *kbuf, size_t *dlen)
840 {
841         tinfo->prev = 0;
842         tinfo->toplevel_group = 0;
843         tinfo->num_levels = 1;
844         tinfo->levels[0].hashtable = offsetof(struct tdb_header, hashtable);
845         tinfo->levels[0].entry = 0;
846         tinfo->levels[0].total_buckets = (1 << TDB_HASH_GROUP_BITS);
847
848         return next_in_hash(tdb, tinfo, kbuf, dlen);
849 }
850
851 /* Even if the entry isn't in this hash bucket, you'd have to lock this
852  * bucket to find it. */
853 static int chainlock(struct tdb_context *tdb, const TDB_DATA *key,
854                      int ltype, enum tdb_lock_flags waitflag,
855                      const char *func)
856 {
857         enum TDB_ERROR ecode;
858         uint64_t h = tdb_hash(tdb, key->dptr, key->dsize);
859         tdb_off_t lockstart, locksize;
860         unsigned int group, gbits;
861
862         gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
863         group = bits_from(h, 64 - gbits, gbits);
864
865         lockstart = hlock_range(group, &locksize);
866
867         ecode = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag);
868         tdb_trace_1rec(tdb, func, *key);
869         if (ecode != TDB_SUCCESS) {
870                 tdb->ecode = ecode;
871                 return -1;
872         }
873         return 0;
874 }
875
876 /* lock/unlock one hash chain. This is meant to be used to reduce
877    contention - it cannot guarantee how many records will be locked */
878 int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
879 {
880         return chainlock(tdb, &key, F_WRLCK, TDB_LOCK_WAIT, "tdb_chainlock");
881 }
882
883 int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
884 {
885         uint64_t h = tdb_hash(tdb, key.dptr, key.dsize);
886         tdb_off_t lockstart, locksize;
887         unsigned int group, gbits;
888         enum TDB_ERROR ecode;
889
890         gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
891         group = bits_from(h, 64 - gbits, gbits);
892
893         lockstart = hlock_range(group, &locksize);
894
895         tdb_trace_1rec(tdb, "tdb_chainunlock", key);
896         ecode = tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK);
897         if (ecode != TDB_SUCCESS) {
898                 tdb->ecode = ecode;
899                 return -1;
900         }
901         return 0;
902 }