]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/hash.c
0cc93760d1613fa44c4a2681133f27aeadb50c19
[ccan] / ccan / tdb2 / hash.c
1  /* 
2    Trivial Database 2: hash handling
3    Copyright (C) Rusty Russell 2010
4    
5    This library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 3 of the License, or (at your option) any later version.
9
10    This library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 */
18 #include "private.h"
19 #include <assert.h>
20 #include <ccan/hash/hash.h>
21
22 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
23                              void *arg)
24 {
25         uint64_t ret;
26         /* hash64_stable assumes lower bits are more important; they are a
27          * slightly better hash.  We use the upper bits first, so swap them. */
28         ret = hash64_stable((const unsigned char *)key, length, seed);
29         return (ret >> 32) | (ret << 32);
30 }
31
32 void tdb_hash_init(struct tdb_context *tdb)
33 {
34         tdb->khash = jenkins_hash;
35         tdb->hash_priv = NULL;
36 }
37
38 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
39 {
40         return tdb->khash(ptr, len, tdb->hash_seed, tdb->hash_priv);
41 }
42
43 uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off)
44 {
45         struct tdb_used_record pad, *r;
46         const void *key;
47         uint64_t klen, hash;
48
49         r = tdb_get(tdb, off, &pad, sizeof(pad));
50         if (!r)
51                 /* FIXME */
52                 return 0;
53
54         klen = rec_key_length(r);
55         key = tdb_access_read(tdb, off + sizeof(pad), klen, false);
56         if (!key)
57                 return 0;
58
59         hash = tdb_hash(tdb, key, klen);
60         tdb_access_release(tdb, key);
61         return hash;
62 }
63
64 /* Get bits from a value. */
65 static uint32_t bits(uint64_t val, unsigned start, unsigned num)
66 {
67         assert(num <= 32);
68         return (val >> start) & ((1U << num) - 1);
69 }
70
71 /* We take bits from the top: that way we can lock whole sections of the hash
72  * by using lock ranges. */
73 static uint32_t use_bits(struct hash_info *h, unsigned num)
74 {
75         h->hash_used += num;
76         return bits(h->h, 64 - h->hash_used, num);
77 }
78
79 /* Does entry match? */
80 static bool match(struct tdb_context *tdb,
81                   struct hash_info *h,
82                   const struct tdb_data *key,
83                   tdb_off_t val,
84                   struct tdb_used_record *rec)
85 {
86         bool ret;
87         const unsigned char *rkey;
88         tdb_off_t off;
89
90         /* FIXME: Handle hash value truncated. */
91         if (bits(val, TDB_OFF_HASH_TRUNCATED_BIT, 1))
92                 abort();
93
94         /* Desired bucket must match. */
95         if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK))
96                 return false;
97
98         /* Top bits of offset == next bits of hash. */
99         if (bits(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA)
100             != bits(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
101                     TDB_OFF_UPPER_STEAL_EXTRA))
102                 return false;
103
104         off = val & TDB_OFF_MASK;
105         if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
106                 return false;
107
108         /* FIXME: check extra bits in header? */
109         if (rec_key_length(rec) != key->dsize)
110                 return false;
111
112         rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false);
113         if (!rkey)
114                 return false;
115         ret = (memcmp(rkey, key->dptr, key->dsize) == 0);
116         tdb_access_release(tdb, rkey);
117         return ret;
118 }
119
120 static tdb_off_t hbucket_off(tdb_off_t group_start, unsigned bucket)
121 {
122         return group_start
123                 + (bucket % (1 << TDB_HASH_GROUP_BITS)) * sizeof(tdb_off_t);
124 }
125
126 /* Truncated hashes can't be all 1: that's how we spot a sub-hash */
127 bool is_subhash(tdb_off_t val)
128 {
129         return val >> (64-TDB_OFF_UPPER_STEAL) == (1<<TDB_OFF_UPPER_STEAL) - 1;
130 }
131
132 /* This is the core routine which searches the hashtable for an entry.
133  * On error, no locks are held and TDB_OFF_ERR is returned.
134  * Otherwise, hinfo is filled in.
135  * If not found, the return value is 0.
136  * If found, the return value is the offset, and *rec is the record. */
137 tdb_off_t find_and_lock(struct tdb_context *tdb,
138                         struct tdb_data key,
139                         int ltype,
140                         struct hash_info *h,
141                         struct tdb_used_record *rec)
142 {
143         uint32_t i, group;
144         tdb_off_t hashtable;
145
146         h->h = tdb_hash(tdb, key.dptr, key.dsize);
147         h->hash_used = 0;
148         group = use_bits(h, TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
149         h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
150
151         /* FIXME: Guess the depth, don't over-lock! */
152         h->hlock_start = (tdb_off_t)group
153                 << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
154         h->hlock_range = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS
155                                         - TDB_HASH_GROUP_BITS));
156         if (tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype,
157                             TDB_LOCK_WAIT))
158                 return TDB_OFF_ERR;
159
160         hashtable = offsetof(struct tdb_header, hashtable);
161
162         while (likely(h->hash_used < 64)) {
163                 /* Read in the hash group. */
164                 h->group_start = hashtable
165                         + group * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
166
167                 if (tdb_read_convert(tdb, h->group_start, &h->group,
168                                      sizeof(h->group)) == -1)
169                         goto fail;
170
171                 /* Pointer to another hash table?  Go down... */
172                 if (is_subhash(h->group[h->home_bucket])) {
173                         hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
174                                 + sizeof(struct tdb_used_record);
175                         group = use_bits(h, TDB_SUBLEVEL_HASH_BITS
176                                          - TDB_HASH_GROUP_BITS);
177                         h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
178                         continue;
179                 }
180
181                 /* It's in this group: search (until 0 or all searched) */
182                 for (i = 0, h->found_bucket = h->home_bucket;
183                      i < (1 << TDB_HASH_GROUP_BITS);
184                      i++, h->found_bucket = ((h->found_bucket+1)
185                                              % (1 << TDB_HASH_GROUP_BITS))) {
186                         if (is_subhash(h->group[h->found_bucket]))
187                                 continue;
188
189                         if (!h->group[h->found_bucket])
190                                 break;
191
192                         if (match(tdb, h, &key, h->group[h->found_bucket], rec))
193                                 return h->group[h->found_bucket] & TDB_OFF_MASK;
194                 }
195                 /* Didn't find it: h indicates where it would go. */
196                 return 0;
197         }
198
199         /* FIXME: We hit the bottom.  Chain! */
200         abort();
201
202 fail:
203         tdb_unlock_hashes(tdb, h->hlock_start, h->hlock_range, ltype);
204         return TDB_OFF_ERR;
205 }
206
207 /* I wrote a simple test, expanding a hash to 2GB, for the following
208  * cases:
209  * 1) Expanding all the buckets at once,
210  * 2) Expanding the most-populated bucket,
211  * 3) Expanding the bucket we wanted to place the new entry ito.
212  *
213  * I measured the worst/average/best density during this process.
214  * 1) 3%/16%/30%
215  * 2) 4%/20%/38%
216  * 3) 6%/22%/41%
217  *
218  * So we figure out the busiest bucket for the moment.
219  */
220 static unsigned fullest_bucket(struct tdb_context *tdb,
221                                const tdb_off_t *group,
222                                unsigned new_bucket)
223 {
224         unsigned counts[1 << TDB_HASH_GROUP_BITS] = { 0 };
225         unsigned int i, best_bucket;
226
227         /* Count the new entry. */
228         counts[new_bucket]++;
229         best_bucket = new_bucket;
230
231         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
232                 unsigned this_bucket;
233
234                 if (is_subhash(group[i]))
235                         continue;
236                 this_bucket = group[i] & TDB_OFF_HASH_GROUP_MASK;
237                 if (++counts[this_bucket] > counts[best_bucket])
238                         best_bucket = this_bucket;
239         }
240
241         return best_bucket;
242 }
243
244 static bool put_into_group(tdb_off_t *group,
245                            unsigned bucket, tdb_off_t encoded)
246 {
247         unsigned int i;
248
249         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
250                 unsigned b = (bucket + i) % (1 << TDB_HASH_GROUP_BITS);
251
252                 if (group[b] == 0) {
253                         group[b] = encoded;
254                         return true;
255                 }
256         }
257         return false;
258 }
259
260 static void force_into_group(tdb_off_t *group,
261                              unsigned bucket, tdb_off_t encoded)
262 {
263         if (!put_into_group(group, bucket, encoded))
264                 abort();
265 }
266
267 static tdb_off_t encode_offset(tdb_off_t new_off, struct hash_info *h)
268 {
269         return h->home_bucket
270                 | new_off
271                 | ((uint64_t)bits(h->h,
272                                   64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
273                                   TDB_OFF_UPPER_STEAL_EXTRA)
274                    << TDB_OFF_HASH_EXTRA_BIT);
275 }
276
277 /* Simply overwrite the hash entry we found before. */ 
278 int replace_in_hash(struct tdb_context *tdb,
279                     struct hash_info *h,
280                     tdb_off_t new_off)
281 {
282         return tdb_write_off(tdb, hbucket_off(h->group_start, h->found_bucket),
283                              encode_offset(new_off, h));
284 }
285
286 /* Add into a newly created subhash. */
287 static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash,
288                           unsigned hash_used, tdb_off_t val)
289 {
290         tdb_off_t off = (val & TDB_OFF_MASK), *group;
291         struct hash_info h;
292         unsigned int gnum;
293
294         h.hash_used = hash_used;
295
296         /* FIXME chain if hash_used == 64 */
297         if (hash_used + TDB_SUBLEVEL_HASH_BITS > 64)
298                 abort();
299
300         /* FIXME: Do truncated hash bits if we can! */
301         h.h = hash_record(tdb, off);
302         gnum = use_bits(&h, TDB_SUBLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS);
303         h.group_start = subhash + sizeof(struct tdb_used_record)
304                 + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
305         h.home_bucket = use_bits(&h, TDB_HASH_GROUP_BITS);
306
307         group = tdb_access_write(tdb, h.group_start,
308                                  sizeof(*group) << TDB_HASH_GROUP_BITS, true);
309         if (!group)
310                 return -1;
311         force_into_group(group, h.home_bucket, encode_offset(off, &h));
312         return tdb_access_commit(tdb, group);
313 }
314
315 static int expand_group(struct tdb_context *tdb, struct hash_info *h)
316 {
317         unsigned bucket, num_vals, i;
318         tdb_off_t subhash;
319         tdb_off_t vals[1 << TDB_HASH_GROUP_BITS];
320
321         /* Attach new empty subhash under fullest bucket. */
322         bucket = fullest_bucket(tdb, h->group, h->home_bucket);
323
324         subhash = alloc(tdb, 0, sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS,
325                         0, false);
326         if (subhash == TDB_OFF_ERR)
327                 return -1;
328
329         if (zero_out(tdb, subhash + sizeof(struct tdb_used_record),
330                      sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS) == -1)
331                 return -1;
332
333         /* Remove any which are destined for bucket or are in wrong place. */
334         num_vals = 0;
335         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
336                 unsigned home_bucket = h->group[i] & TDB_OFF_HASH_GROUP_MASK;
337                 if (!h->group[i] || is_subhash(h->group[i]))
338                         continue;
339                 if (home_bucket == bucket || home_bucket != i) {
340                         vals[num_vals++] = h->group[i];
341                         h->group[i] = 0;
342                 }
343         }
344         /* FIXME: This assert is valid, but we do this during unit test :( */
345         /* assert(num_vals); */
346
347         /* Overwrite expanded bucket with subhash pointer. */
348         h->group[bucket] = subhash | ~((1ULL << (64 - TDB_OFF_UPPER_STEAL))-1);
349
350         /* Put values back. */
351         for (i = 0; i < num_vals; i++) {
352                 unsigned this_bucket = vals[i] & TDB_OFF_HASH_GROUP_MASK;
353
354                 if (this_bucket == bucket) {
355                         if (add_to_subhash(tdb, subhash, h->hash_used, vals[i]))
356                                 return -1;
357                 } else {
358                         /* There should be room to put this back. */
359                         force_into_group(h->group, this_bucket, vals[i]);
360                 }
361         }
362         return 0;
363 }
364
365 int delete_from_hash(struct tdb_context *tdb, struct hash_info *h)
366 {
367         unsigned int i, num_movers = 0;
368         tdb_off_t movers[1 << TDB_HASH_GROUP_BITS];
369
370         h->group[h->found_bucket] = 0;
371         for (i = 1; i < (1 << TDB_HASH_GROUP_BITS); i++) {
372                 unsigned this_bucket;
373
374                 this_bucket = (h->found_bucket+i) % (1 << TDB_HASH_GROUP_BITS);
375                 /* Empty bucket?  We're done. */
376                 if (!h->group[this_bucket])
377                         break;
378
379                 /* Ignore subhashes. */
380                 if (is_subhash(h->group[this_bucket]))
381                         continue;
382
383                 /* If this one is not happy where it is, we'll move it. */
384                 if ((h->group[this_bucket] & TDB_OFF_HASH_GROUP_MASK)
385                     != this_bucket) {
386                         movers[num_movers++] = h->group[this_bucket];
387                         h->group[this_bucket] = 0;
388                 }
389         }
390
391         /* Put back the ones we erased. */
392         for (i = 0; i < num_movers; i++) {
393                 force_into_group(h->group, movers[i] & TDB_OFF_HASH_GROUP_MASK,
394                                  movers[i]);
395         }
396
397         /* Now we write back the hash group */
398         return tdb_write_convert(tdb, h->group_start,
399                                  h->group, sizeof(h->group));
400 }
401
402 int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off)
403 {
404         /* FIXME: chain! */
405         if (h->hash_used >= 64)
406                 abort();
407
408         /* We hit an empty bucket during search?  That's where it goes. */
409         if (!h->group[h->found_bucket]) {
410                 h->group[h->found_bucket] = encode_offset(new_off, h);
411                 /* Write back the modified group. */
412                 return tdb_write_convert(tdb, h->group_start,
413                                          h->group, sizeof(h->group));
414         }
415
416         /* We're full.  Expand. */
417         if (expand_group(tdb, h) == -1)
418                 return -1;
419
420         if (is_subhash(h->group[h->home_bucket])) {
421                 /* We were expanded! */
422                 tdb_off_t hashtable;
423                 unsigned int gnum;
424
425                 /* Write back the modified group. */
426                 if (tdb_write_convert(tdb, h->group_start, h->group,
427                                       sizeof(h->group)))
428                         return -1;
429
430                 /* Move hashinfo down a level. */
431                 hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
432                         + sizeof(struct tdb_used_record);
433                 gnum = use_bits(h,TDB_SUBLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
434                 h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
435                 h->group_start = hashtable
436                         + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
437                 if (tdb_read_convert(tdb, h->group_start, &h->group,
438                                      sizeof(h->group)) == -1)
439                         return -1;
440         }
441
442         /* Expanding the group must have made room if it didn't choose this
443          * bucket. */
444         if (put_into_group(h->group, h->home_bucket, encode_offset(new_off, h)))
445                 return tdb_write_convert(tdb, h->group_start,
446                                          h->group, sizeof(h->group));
447
448         /* This can happen if all hashes in group (and us) dropped into same
449          * group in subhash. */
450         return add_to_hash(tdb, h, new_off);
451 }
452
453 /* No point holding references/copies of db once we drop lock. */
454 static void release_entries(struct tdb_context *tdb,
455                             struct traverse_info *tinfo)
456 {
457         unsigned int i;
458
459         for (i = 0; i < tinfo->num_levels; i++) {
460                 if (tinfo->levels[i].entries) {
461                         tdb_access_release(tdb, tinfo->levels[i].entries);
462                         tinfo->levels[i].entries = NULL;
463                 }
464         }
465 }
466
467 /* Traverse support: returns offset of record, or 0 or TDB_OFF_ERR. */
468 static tdb_off_t iterate_hash(struct tdb_context *tdb,
469                               struct traverse_info *tinfo)
470 {
471         tdb_off_t off;
472         unsigned int i;
473         struct traverse_level *tlevel;
474
475         tlevel = &tinfo->levels[tinfo->num_levels-1];
476
477 again:
478         if (!tlevel->entries) {
479                 tlevel->entries = tdb_access_read(tdb, tlevel->hashtable,
480                                                   sizeof(tdb_off_t)
481                                                   * tlevel->total_buckets,
482                                                   true);
483                 if (!tlevel->entries)
484                         return TDB_OFF_ERR;
485         }
486
487         /* FIXME: Use tdb_find_nonzero_off? */ 
488         for (i = tlevel->entry; i < tlevel->total_buckets; i++) {
489                 if (!tlevel->entries[i] || tlevel->entries[i] == tinfo->prev)
490                         continue;
491
492                 tlevel->entry = i;
493                 off = tlevel->entries[i] & TDB_OFF_MASK;
494
495                 if (!is_subhash(tlevel->entries[i])) {
496                         /* Found one. */
497                         tinfo->prev = tlevel->entries[i];
498                         release_entries(tdb, tinfo);
499                         return off;
500                 }
501
502                 /* When we come back, we want tne next one */
503                 tlevel->entry++;
504                 tinfo->num_levels++;
505                 tlevel++;
506                 tlevel->hashtable = off + sizeof(struct tdb_used_record);
507                 tlevel->entry = 0;
508                 tlevel->entries = NULL;
509                 tlevel->total_buckets = (1 << TDB_SUBLEVEL_HASH_BITS);
510                 goto again;
511         }
512
513         /* Nothing there? */
514         if (tinfo->num_levels == 1) {
515                 release_entries(tdb, tinfo);
516                 return 0;
517         }
518
519         /* Go back up and keep searching. */
520         tdb_access_release(tdb, tlevel->entries);
521         tinfo->num_levels--;
522         tlevel--;
523         goto again;
524 }
525
526 /* Return 1 if we find something, 0 if not, -1 on error. */
527 int next_in_hash(struct tdb_context *tdb, int ltype,
528                  struct traverse_info *tinfo,
529                  TDB_DATA *kbuf, unsigned int *dlen)
530 {
531         const unsigned group_bits = TDB_TOPLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS;
532         tdb_off_t hlock_start, hlock_range, off;
533
534         while (tinfo->toplevel_group < (1 << group_bits)) {
535                 hlock_start = (tdb_off_t)tinfo->toplevel_group
536                         << (64 - group_bits);
537                 hlock_range = 1ULL << group_bits;
538                 if (tdb_lock_hashes(tdb, hlock_start, hlock_range, ltype,
539                                     TDB_LOCK_WAIT) != 0)
540                         return -1;
541
542                 off = iterate_hash(tdb, tinfo);
543                 if (off) {
544                         struct tdb_used_record rec;
545
546                         if (tdb_read_convert(tdb, off, &rec, sizeof(rec))) {
547                                 tdb_unlock_hashes(tdb,
548                                                   hlock_start, hlock_range,
549                                                   ltype);
550                                 return -1;
551                         }
552                         kbuf->dsize = rec_key_length(&rec);
553
554                         /* They want data as well? */
555                         if (dlen) {
556                                 *dlen = rec_data_length(&rec);
557                                 kbuf->dptr = tdb_alloc_read(tdb, 
558                                                             off + sizeof(rec),
559                                                             kbuf->dsize
560                                                             + *dlen);
561                         } else {
562                                 kbuf->dptr = tdb_alloc_read(tdb, 
563                                                             off + sizeof(rec),
564                                                             kbuf->dsize);
565                         }
566                         tdb_unlock_hashes(tdb, hlock_start, hlock_range, ltype);
567                         return kbuf->dptr ? 1 : -1;
568                 }
569
570                 tdb_unlock_hashes(tdb, hlock_start, hlock_range, ltype);
571
572                 tinfo->toplevel_group++;
573                 tinfo->levels[0].hashtable
574                         += (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
575                 tinfo->levels[0].entry = 0;
576         }
577         return 0;
578 }
579
580 /* Return 1 if we find something, 0 if not, -1 on error. */
581 int first_in_hash(struct tdb_context *tdb, int ltype,
582                   struct traverse_info *tinfo,
583                   TDB_DATA *kbuf, unsigned int *dlen)
584 {
585         tinfo->prev = 0;
586         tinfo->toplevel_group = 0;
587         tinfo->num_levels = 1;
588         tinfo->levels[0].hashtable = offsetof(struct tdb_header, hashtable);
589         tinfo->levels[0].entries = NULL;
590         tinfo->levels[0].entry = 0;
591         tinfo->levels[0].total_buckets = (1 << TDB_HASH_GROUP_BITS);
592
593         return next_in_hash(tdb, ltype, tinfo, kbuf, dlen);
594 }