]> git.ozlabs.org Git - ccan/blobdiff - ccan/tdb2/hash.c
tdb2: add comparison stats
[ccan] / ccan / tdb2 / hash.c
index d3a195a789f5fcdd76a957e8069e03d2eb0176e3..d4d08e420d878f6e35f60fac9cac1bbbcb5a840f 100644 (file)
@@ -42,17 +42,19 @@ uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
 
 uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off)
 {
-       struct tdb_used_record pad, *r;
+       const struct tdb_used_record *r;
        const void *key;
        uint64_t klen, hash;
 
-       r = tdb_get(tdb, off, &pad, sizeof(pad));
+       r = tdb_access_read(tdb, off, sizeof(*r), true);
        if (!r)
                /* FIXME */
                return 0;
 
        klen = rec_key_length(r);
-       key = tdb_access_read(tdb, off + sizeof(pad), klen, false);
+       tdb_access_release(tdb, r);
+
+       key = tdb_access_read(tdb, off + sizeof(*r), klen, false);
        if (!key)
                return 0;
 
@@ -83,36 +85,42 @@ static bool match(struct tdb_context *tdb,
                  tdb_off_t val,
                  struct tdb_used_record *rec)
 {
-       bool ret;
+       bool ret = false;
        const unsigned char *rkey;
        tdb_off_t off;
 
-       /* FIXME: Handle hash value truncated. */
-       if (bits(val, TDB_OFF_HASH_TRUNCATED_BIT, 1))
-               abort();
-
+       add_stat(tdb, compares, 1);
        /* Desired bucket must match. */
-       if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK))
-               return false;
+       if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK)) {
+               add_stat(tdb, compare_wrong_bucket, 1);
+               return ret;
+       }
 
        /* Top bits of offset == next bits of hash. */
        if (bits(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA)
            != bits(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
-                   TDB_OFF_UPPER_STEAL_EXTRA))
-               return false;
+                   TDB_OFF_UPPER_STEAL_EXTRA)) {
+               add_stat(tdb, compare_wrong_offsetbits, 1);
+               return ret;
+       }
 
        off = val & TDB_OFF_MASK;
        if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
-               return false;
+               return ret;
 
        /* FIXME: check extra bits in header? */
-       if (rec_key_length(rec) != key->dsize)
-               return false;
+       if (rec_key_length(rec) != key->dsize) {
+               add_stat(tdb, compare_wrong_keylen, 1);
+               return ret;
+       }
 
        rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false);
        if (!rkey)
-               return false;
-       ret = (memcmp(rkey, key->dptr, key->dsize) == 0);
+               return ret;
+       if (memcmp(rkey, key->dptr, key->dsize) == 0)
+               ret = true;
+       else
+               add_stat(tdb, compare_wrong_keycmp, 1);
        tdb_access_release(tdb, rkey);
        return ret;
 }
@@ -123,10 +131,16 @@ static tdb_off_t hbucket_off(tdb_off_t group_start, unsigned bucket)
                + (bucket % (1 << TDB_HASH_GROUP_BITS)) * sizeof(tdb_off_t);
 }
 
-/* Truncated hashes can't be all 1: that's how we spot a sub-hash */
 bool is_subhash(tdb_off_t val)
 {
-       return val >> (64-TDB_OFF_UPPER_STEAL) == (1<<TDB_OFF_UPPER_STEAL) - 1;
+       return (val >> TDB_OFF_UPPER_STEAL_SUBHASH_BIT) & 1;
+}
+
+/* FIXME: Guess the depth, don't over-lock! */
+static tdb_off_t hlock_range(tdb_off_t group, tdb_off_t *size)
+{
+       *size = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
+       return group << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
 }
 
 /* This is the core routine which searches the hashtable for an entry.
@@ -149,11 +163,7 @@ tdb_off_t find_and_lock(struct tdb_context *tdb,
        group = use_bits(h, TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
        h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
 
-       /* FIXME: Guess the depth, don't over-lock! */
-       h->hlock_start = (tdb_off_t)group
-               << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
-       h->hlock_range = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS
-                                       - TDB_HASH_GROUP_BITS));
+       h->hlock_start = hlock_range(group, &h->hlock_range);
        if (tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype,
                            TDB_LOCK_WAIT))
                return TDB_OFF_ERR;
@@ -236,8 +246,8 @@ fail:
 /* I wrote a simple test, expanding a hash to 2GB, for the following
  * cases:
  * 1) Expanding all the buckets at once,
- * 2) Expanding the most-populated bucket,
- * 3) Expanding the bucket we wanted to place the new entry ito.
+ * 2) Expanding the bucket we wanted to place the new entry into.
+ * 3) Expanding the most-populated bucket,
  *
  * I measured the worst/average/best density during this process.
  * 1) 3%/16%/30%
@@ -326,7 +336,6 @@ static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash,
        if (hash_used + TDB_SUBLEVEL_HASH_BITS > 64)
                abort();
 
-       /* FIXME: Do truncated hash bits if we can! */
        h.h = hash_record(tdb, off);
        gnum = use_bits(&h, TDB_SUBLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS);
        h.group_start = subhash + sizeof(struct tdb_used_record)
@@ -355,6 +364,7 @@ static int expand_group(struct tdb_context *tdb, struct hash_info *h)
        if (subhash == TDB_OFF_ERR)
                return -1;
 
+       add_stat(tdb, alloc_subhash, 1);
        if (zero_out(tdb, subhash + sizeof(struct tdb_used_record),
                     sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS) == -1)
                return -1;
@@ -374,7 +384,7 @@ static int expand_group(struct tdb_context *tdb, struct hash_info *h)
        /* assert(num_vals); */
 
        /* Overwrite expanded bucket with subhash pointer. */
-       h->group[bucket] = subhash | ~((1ULL << (64 - TDB_OFF_UPPER_STEAL))-1);
+       h->group[bucket] = subhash | (1ULL << TDB_OFF_UPPER_STEAL_SUBHASH_BIT);
 
        /* Put values back. */
        for (i = 0; i < num_vals; i++) {
@@ -560,6 +570,15 @@ int next_in_hash(struct tdb_context *tdb, int ltype,
                                                  ltype);
                                return -1;
                        }
+                       if (rec_magic(&rec) != TDB_MAGIC) {
+                               tdb_logerr(tdb, TDB_ERR_CORRUPT,
+                                          TDB_DEBUG_FATAL,
+                                          "next_in_hash:"
+                                          " corrupt record at %llu",
+                                          (long long)off);
+                               return -1;
+                       }
+
                        kbuf->dsize = rec_key_length(&rec);
 
                        /* They want data as well? */
@@ -602,3 +621,46 @@ int first_in_hash(struct tdb_context *tdb, int ltype,
 
        return next_in_hash(tdb, ltype, tinfo, kbuf, dlen);
 }
+
+/* Even if the entry isn't in this hash bucket, you'd have to lock this
+ * bucket to find it. */
+static int chainlock(struct tdb_context *tdb, const TDB_DATA *key,
+                    int ltype, enum tdb_lock_flags waitflag,
+                    const char *func)
+{
+       int ret;
+       uint64_t h = tdb_hash(tdb, key->dptr, key->dsize);
+       tdb_off_t lockstart, locksize;
+       unsigned int group, gbits;
+
+       gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
+       group = bits(h, 64 - gbits, gbits);
+
+       lockstart = hlock_range(group, &locksize);
+
+       ret = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag);
+       tdb_trace_1rec(tdb, func, *key);
+       return ret;
+}
+
+/* lock/unlock one hash chain. This is meant to be used to reduce
+   contention - it cannot guarantee how many records will be locked */
+int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
+{
+       return chainlock(tdb, &key, F_WRLCK, TDB_LOCK_WAIT, "tdb_chainlock");
+}
+
+int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
+{
+       uint64_t h = tdb_hash(tdb, key.dptr, key.dsize);
+       tdb_off_t lockstart, locksize;
+       unsigned int group, gbits;
+
+       gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
+       group = bits(h, 64 - gbits, gbits);
+
+       lockstart = hlock_range(group, &locksize);
+
+       tdb_trace_1rec(tdb, "tdb_chainunlock", key);
+       return tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK);
+}