X-Git-Url: https://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Ftdb2%2Fhash.c;h=69df151e98e8100593d0a5582b8cfd84c09dac05;hp=51874918c62ef4fb53e5581c252e8ace6301de9a;hb=576802602c19ed3cfda98414ffc9b118c2675931;hpb=c5e3f07a30056cb7c3c380bf690f80815ca4b6b1 diff --git a/ccan/tdb2/hash.c b/ccan/tdb2/hash.c index 51874918..69df151e 100644 --- a/ccan/tdb2/hash.c +++ b/ccan/tdb2/hash.c @@ -42,17 +42,19 @@ uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len) uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off) { - struct tdb_used_record pad, *r; + const struct tdb_used_record *r; const void *key; uint64_t klen, hash; - r = tdb_get(tdb, off, &pad, sizeof(pad)); + r = tdb_access_read(tdb, off, sizeof(*r), true); if (!r) /* FIXME */ return 0; klen = rec_key_length(r); - key = tdb_access_read(tdb, off + sizeof(pad), klen, false); + tdb_access_release(tdb, r); + + key = tdb_access_read(tdb, off + sizeof(*r), klen, false); if (!key) return 0; @@ -87,10 +89,6 @@ static bool match(struct tdb_context *tdb, const unsigned char *rkey; tdb_off_t off; - /* FIXME: Handle hash value truncated. */ - if (bits(val, TDB_OFF_HASH_TRUNCATED_BIT, 1)) - abort(); - /* Desired bucket must match. */ if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK)) return false; @@ -123,10 +121,9 @@ static tdb_off_t hbucket_off(tdb_off_t group_start, unsigned bucket) + (bucket % (1 << TDB_HASH_GROUP_BITS)) * sizeof(tdb_off_t); } -/* Truncated hashes can't be all 1: that's how we spot a sub-hash */ bool is_subhash(tdb_off_t val) { - return val >> (64-TDB_OFF_UPPER_STEAL) == (1<> TDB_OFF_UPPER_STEAL_SUBHASH_BIT) & 1; } /* FIXME: Guess the depth, don't over-lock! */ @@ -239,8 +236,8 @@ fail: /* I wrote a simple test, expanding a hash to 2GB, for the following * cases: * 1) Expanding all the buckets at once, - * 2) Expanding the most-populated bucket, - * 3) Expanding the bucket we wanted to place the new entry ito. + * 2) Expanding the bucket we wanted to place the new entry into. + * 3) Expanding the most-populated bucket, * * I measured the worst/average/best density during this process. * 1) 3%/16%/30% @@ -329,7 +326,6 @@ static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash, if (hash_used + TDB_SUBLEVEL_HASH_BITS > 64) abort(); - /* FIXME: Do truncated hash bits if we can! */ h.h = hash_record(tdb, off); gnum = use_bits(&h, TDB_SUBLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS); h.group_start = subhash + sizeof(struct tdb_used_record) @@ -358,6 +354,7 @@ static int expand_group(struct tdb_context *tdb, struct hash_info *h) if (subhash == TDB_OFF_ERR) return -1; + add_stat(tdb, alloc_subhash, 1); if (zero_out(tdb, subhash + sizeof(struct tdb_used_record), sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS) == -1) return -1; @@ -377,7 +374,7 @@ static int expand_group(struct tdb_context *tdb, struct hash_info *h) /* assert(num_vals); */ /* Overwrite expanded bucket with subhash pointer. */ - h->group[bucket] = subhash | ~((1ULL << (64 - TDB_OFF_UPPER_STEAL))-1); + h->group[bucket] = subhash | (1ULL << TDB_OFF_UPPER_STEAL_SUBHASH_BIT); /* Put values back. */ for (i = 0; i < num_vals; i++) {