2 Trivial Database 2: hash handling
3 Copyright (C) Rusty Russell 2010
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 3 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <ccan/hash/hash.h>
22 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
26 /* hash64_stable assumes lower bits are more important; they are a
27 * slightly better hash. We use the upper bits first, so swap them. */
28 ret = hash64_stable((const unsigned char *)key, length, seed);
29 return (ret >> 32) | (ret << 32);
32 void tdb_hash_init(struct tdb_context *tdb)
34 tdb->khash = jenkins_hash;
35 tdb->hash_priv = NULL;
38 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
40 return tdb->khash(ptr, len, tdb->hash_seed, tdb->hash_priv);
43 uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off)
45 struct tdb_used_record pad, *r;
49 r = tdb_get(tdb, off, &pad, sizeof(pad));
54 klen = rec_key_length(r);
55 key = tdb_access_read(tdb, off + sizeof(pad), klen, false);
59 hash = tdb_hash(tdb, key, klen);
60 tdb_access_release(tdb, key);
64 /* Get bits from a value. */
65 static uint32_t bits(uint64_t val, unsigned start, unsigned num)
68 return (val >> start) & ((1U << num) - 1);
71 /* We take bits from the top: that way we can lock whole sections of the hash
72 * by using lock ranges. */
73 static uint32_t use_bits(struct hash_info *h, unsigned num)
76 return bits(h->h, 64 - h->hash_used, num);
79 /* Does entry match? */
80 static bool match(struct tdb_context *tdb,
82 const struct tdb_data *key,
84 struct tdb_used_record *rec)
87 const unsigned char *rkey;
90 /* FIXME: Handle hash value truncated. */
91 if (bits(val, TDB_OFF_HASH_TRUNCATED_BIT, 1))
94 /* Desired bucket must match. */
95 if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK))
98 /* Top bits of offset == next bits of hash. */
99 if (bits(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA)
100 != bits(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
101 TDB_OFF_UPPER_STEAL_EXTRA))
104 off = val & TDB_OFF_MASK;
105 if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
108 /* FIXME: check extra bits in header? */
109 if (rec_key_length(rec) != key->dsize)
112 rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false);
115 ret = (memcmp(rkey, key->dptr, key->dsize) == 0);
116 tdb_access_release(tdb, rkey);
120 static tdb_off_t hbucket_off(tdb_off_t group_start, unsigned bucket)
123 + (bucket % (1 << TDB_HASH_GROUP_BITS)) * sizeof(tdb_off_t);
126 /* Truncated hashes can't be all 1: that's how we spot a sub-hash */
127 bool is_subhash(tdb_off_t val)
129 return val >> (64-TDB_OFF_UPPER_STEAL) == (1<<TDB_OFF_UPPER_STEAL) - 1;
132 /* This is the core routine which searches the hashtable for an entry.
133 * On error, no locks are held and TDB_OFF_ERR is returned.
134 * Otherwise, hinfo is filled in.
135 * If not found, the return value is 0.
136 * If found, the return value is the offset, and *rec is the record. */
137 tdb_off_t find_and_lock(struct tdb_context *tdb,
141 struct tdb_used_record *rec)
146 h->h = tdb_hash(tdb, key.dptr, key.dsize);
148 group = use_bits(h, TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
149 h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
151 /* FIXME: Guess the depth, don't over-lock! */
152 h->hlock_start = (tdb_off_t)group
153 << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
154 h->hlock_range = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS
155 - TDB_HASH_GROUP_BITS));
156 if (tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype,
160 hashtable = offsetof(struct tdb_header, hashtable);
162 while (likely(h->hash_used < 64)) {
163 /* Read in the hash group. */
164 h->group_start = hashtable
165 + group * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
167 if (tdb_read_convert(tdb, h->group_start, &h->group,
168 sizeof(h->group)) == -1)
171 /* Pointer to another hash table? Go down... */
172 if (is_subhash(h->group[h->home_bucket])) {
173 hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
174 + sizeof(struct tdb_used_record);
175 group = use_bits(h, TDB_SUBLEVEL_HASH_BITS
176 - TDB_HASH_GROUP_BITS);
177 h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
181 /* It's in this group: search (until 0 or all searched) */
182 for (i = 0, h->found_bucket = h->home_bucket;
183 i < (1 << TDB_HASH_GROUP_BITS);
184 i++, h->found_bucket = ((h->found_bucket+1)
185 % (1 << TDB_HASH_GROUP_BITS))) {
186 if (is_subhash(h->group[h->found_bucket]))
189 if (!h->group[h->found_bucket])
192 if (match(tdb, h, &key, h->group[h->found_bucket], rec))
193 return h->group[h->found_bucket] & TDB_OFF_MASK;
195 /* Didn't find it: h indicates where it would go. */
199 /* FIXME: We hit the bottom. Chain! */
203 tdb_unlock_hashes(tdb, h->hlock_start, h->hlock_range, ltype);
207 /* I wrote a simple test, expanding a hash to 2GB, for the following
209 * 1) Expanding all the buckets at once,
210 * 2) Expanding the most-populated bucket,
211 * 3) Expanding the bucket we wanted to place the new entry ito.
213 * I measured the worst/average/best density during this process.
218 * So we figure out the busiest bucket for the moment.
220 static unsigned fullest_bucket(struct tdb_context *tdb,
221 const tdb_off_t *group,
224 unsigned counts[1 << TDB_HASH_GROUP_BITS] = { 0 };
225 unsigned int i, best_bucket;
227 /* Count the new entry. */
228 counts[new_bucket]++;
229 best_bucket = new_bucket;
231 for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
232 unsigned this_bucket;
234 if (is_subhash(group[i]))
236 this_bucket = group[i] & TDB_OFF_HASH_GROUP_MASK;
237 if (++counts[this_bucket] > counts[best_bucket])
238 best_bucket = this_bucket;
244 static bool put_into_group(tdb_off_t *group,
245 unsigned bucket, tdb_off_t encoded)
249 for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
250 unsigned b = (bucket + i) % (1 << TDB_HASH_GROUP_BITS);
260 static void force_into_group(tdb_off_t *group,
261 unsigned bucket, tdb_off_t encoded)
263 if (!put_into_group(group, bucket, encoded))
267 static tdb_off_t encode_offset(tdb_off_t new_off, struct hash_info *h)
269 return h->home_bucket
271 | ((uint64_t)bits(h->h,
272 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
273 TDB_OFF_UPPER_STEAL_EXTRA)
274 << TDB_OFF_HASH_EXTRA_BIT);
277 /* Simply overwrite the hash entry we found before. */
278 int replace_in_hash(struct tdb_context *tdb,
282 return tdb_write_off(tdb, hbucket_off(h->group_start, h->found_bucket),
283 encode_offset(new_off, h));
286 /* Add into a newly created subhash. */
287 static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash,
288 unsigned hash_used, tdb_off_t val)
290 tdb_off_t off = (val & TDB_OFF_MASK), *group;
294 h.hash_used = hash_used;
296 /* FIXME chain if hash_used == 64 */
297 if (hash_used + TDB_SUBLEVEL_HASH_BITS > 64)
300 /* FIXME: Do truncated hash bits if we can! */
301 h.h = hash_record(tdb, off);
302 gnum = use_bits(&h, TDB_SUBLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS);
303 h.group_start = subhash + sizeof(struct tdb_used_record)
304 + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
305 h.home_bucket = use_bits(&h, TDB_HASH_GROUP_BITS);
307 group = tdb_access_write(tdb, h.group_start,
308 sizeof(*group) << TDB_HASH_GROUP_BITS, true);
311 force_into_group(group, h.home_bucket, encode_offset(off, &h));
312 return tdb_access_commit(tdb, group);
315 static int expand_group(struct tdb_context *tdb, struct hash_info *h)
317 unsigned bucket, num_vals, i;
319 tdb_off_t vals[1 << TDB_HASH_GROUP_BITS];
321 /* Attach new empty subhash under fullest bucket. */
322 bucket = fullest_bucket(tdb, h->group, h->home_bucket);
324 subhash = alloc(tdb, 0, sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS,
326 if (subhash == TDB_OFF_ERR)
329 if (zero_out(tdb, subhash + sizeof(struct tdb_used_record),
330 sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS) == -1)
333 /* Remove any which are destined for bucket or are in wrong place. */
335 for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
336 unsigned home_bucket = h->group[i] & TDB_OFF_HASH_GROUP_MASK;
337 if (!h->group[i] || is_subhash(h->group[i]))
339 if (home_bucket == bucket || home_bucket != i) {
340 vals[num_vals++] = h->group[i];
344 /* FIXME: This assert is valid, but we do this during unit test :( */
345 /* assert(num_vals); */
347 /* Overwrite expanded bucket with subhash pointer. */
348 h->group[bucket] = subhash | ~((1ULL << (64 - TDB_OFF_UPPER_STEAL))-1);
350 /* Put values back. */
351 for (i = 0; i < num_vals; i++) {
352 unsigned this_bucket = vals[i] & TDB_OFF_HASH_GROUP_MASK;
354 if (this_bucket == bucket) {
355 if (add_to_subhash(tdb, subhash, h->hash_used, vals[i]))
358 /* There should be room to put this back. */
359 force_into_group(h->group, this_bucket, vals[i]);
365 int delete_from_hash(struct tdb_context *tdb, struct hash_info *h)
367 unsigned int i, num_movers = 0;
368 tdb_off_t movers[1 << TDB_HASH_GROUP_BITS];
370 h->group[h->found_bucket] = 0;
371 for (i = 1; i < (1 << TDB_HASH_GROUP_BITS); i++) {
372 unsigned this_bucket;
374 this_bucket = (h->found_bucket+i) % (1 << TDB_HASH_GROUP_BITS);
375 /* Empty bucket? We're done. */
376 if (!h->group[this_bucket])
379 /* Ignore subhashes. */
380 if (is_subhash(h->group[this_bucket]))
383 /* If this one is not happy where it is, we'll move it. */
384 if ((h->group[this_bucket] & TDB_OFF_HASH_GROUP_MASK)
386 movers[num_movers++] = h->group[this_bucket];
387 h->group[this_bucket] = 0;
391 /* Put back the ones we erased. */
392 for (i = 0; i < num_movers; i++) {
393 force_into_group(h->group, movers[i] & TDB_OFF_HASH_GROUP_MASK,
397 /* Now we write back the hash group */
398 return tdb_write_convert(tdb, h->group_start,
399 h->group, sizeof(h->group));
402 int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off)
405 if (h->hash_used >= 64)
408 /* We hit an empty bucket during search? That's where it goes. */
409 if (!h->group[h->found_bucket]) {
410 h->group[h->found_bucket] = encode_offset(new_off, h);
411 /* Write back the modified group. */
412 return tdb_write_convert(tdb, h->group_start,
413 h->group, sizeof(h->group));
416 /* We're full. Expand. */
417 if (expand_group(tdb, h) == -1)
420 if (is_subhash(h->group[h->home_bucket])) {
421 /* We were expanded! */
425 /* Write back the modified group. */
426 if (tdb_write_convert(tdb, h->group_start, h->group,
430 /* Move hashinfo down a level. */
431 hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
432 + sizeof(struct tdb_used_record);
433 gnum = use_bits(h,TDB_SUBLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
434 h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
435 h->group_start = hashtable
436 + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
437 if (tdb_read_convert(tdb, h->group_start, &h->group,
438 sizeof(h->group)) == -1)
442 /* Expanding the group must have made room if it didn't choose this
444 if (put_into_group(h->group, h->home_bucket, encode_offset(new_off, h)))
445 return tdb_write_convert(tdb, h->group_start,
446 h->group, sizeof(h->group));
448 /* This can happen if all hashes in group (and us) dropped into same
449 * group in subhash. */
450 return add_to_hash(tdb, h, new_off);
453 /* Traverse support: returns offset of record, or 0 or TDB_OFF_ERR. */
454 static tdb_off_t iterate_hash(struct tdb_context *tdb,
455 struct traverse_info *tinfo)
459 struct traverse_level *tlevel;
461 tlevel = &tinfo->levels[tinfo->num_levels-1];
464 for (i = tdb_find_nonzero_off(tdb, tlevel->hashtable,
465 tlevel->entry, tlevel->total_buckets);
466 i != tlevel->total_buckets;
467 i = tdb_find_nonzero_off(tdb, tlevel->hashtable,
468 i+1, tlevel->total_buckets)) {
469 val = tdb_read_off(tdb, tlevel->hashtable+sizeof(tdb_off_t)*i);
470 if (unlikely(val == TDB_OFF_ERR))
473 /* This makes the delete-all-in-traverse case work
474 * (and simplifies our logic a little). */
475 if (val == tinfo->prev)
479 off = val & TDB_OFF_MASK;
481 if (!is_subhash(val)) {
487 /* When we come back, we want the next one */
491 tlevel->hashtable = off + sizeof(struct tdb_used_record);
493 tlevel->total_buckets = (1 << TDB_SUBLEVEL_HASH_BITS);
498 if (tinfo->num_levels == 1)
501 /* Go back up and keep searching. */
507 /* Return 1 if we find something, 0 if not, -1 on error. */
508 int next_in_hash(struct tdb_context *tdb, int ltype,
509 struct traverse_info *tinfo,
510 TDB_DATA *kbuf, unsigned int *dlen)
512 const unsigned group_bits = TDB_TOPLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS;
513 tdb_off_t hlock_start, hlock_range, off;
515 while (tinfo->toplevel_group < (1 << group_bits)) {
516 hlock_start = (tdb_off_t)tinfo->toplevel_group
517 << (64 - group_bits);
518 hlock_range = 1ULL << group_bits;
519 if (tdb_lock_hashes(tdb, hlock_start, hlock_range, ltype,
523 off = iterate_hash(tdb, tinfo);
525 struct tdb_used_record rec;
527 if (tdb_read_convert(tdb, off, &rec, sizeof(rec))) {
528 tdb_unlock_hashes(tdb,
529 hlock_start, hlock_range,
533 kbuf->dsize = rec_key_length(&rec);
535 /* They want data as well? */
537 *dlen = rec_data_length(&rec);
538 kbuf->dptr = tdb_alloc_read(tdb,
543 kbuf->dptr = tdb_alloc_read(tdb,
547 tdb_unlock_hashes(tdb, hlock_start, hlock_range, ltype);
548 return kbuf->dptr ? 1 : -1;
551 tdb_unlock_hashes(tdb, hlock_start, hlock_range, ltype);
553 tinfo->toplevel_group++;
554 tinfo->levels[0].hashtable
555 += (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
556 tinfo->levels[0].entry = 0;
561 /* Return 1 if we find something, 0 if not, -1 on error. */
562 int first_in_hash(struct tdb_context *tdb, int ltype,
563 struct traverse_info *tinfo,
564 TDB_DATA *kbuf, unsigned int *dlen)
567 tinfo->toplevel_group = 0;
568 tinfo->num_levels = 1;
569 tinfo->levels[0].hashtable = offsetof(struct tdb_header, hashtable);
570 tinfo->levels[0].entry = 0;
571 tinfo->levels[0].total_buckets = (1 << TDB_HASH_GROUP_BITS);
573 return next_in_hash(tdb, ltype, tinfo, kbuf, dlen);