]> git.ozlabs.org Git - ccan/blobdiff - ccan/tdb2/hash.c
tdb2: clean up logging
[ccan] / ccan / tdb2 / hash.c
index dec7aae554acbd0f21fd10bf44c886ef72aae9ae..a8a701ec507d1ab13de7551a96244389c38742e3 100644 (file)
@@ -42,17 +42,19 @@ uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
 
 uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off)
 {
-       struct tdb_used_record pad, *r;
+       const struct tdb_used_record *r;
        const void *key;
        uint64_t klen, hash;
 
-       r = tdb_get(tdb, off, &pad, sizeof(pad));
+       r = tdb_access_read(tdb, off, sizeof(*r), true);
        if (!r)
                /* FIXME */
                return 0;
 
        klen = rec_key_length(r);
-       key = tdb_access_read(tdb, off + sizeof(pad), klen, false);
+       tdb_access_release(tdb, r);
+
+       key = tdb_access_read(tdb, off + sizeof(*r), klen, false);
        if (!key)
                return 0;
 
@@ -87,10 +89,6 @@ static bool match(struct tdb_context *tdb,
        const unsigned char *rkey;
        tdb_off_t off;
 
-       /* FIXME: Handle hash value truncated. */
-       if (bits(val, TDB_OFF_HASH_TRUNCATED_BIT, 1))
-               abort();
-
        /* Desired bucket must match. */
        if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK))
                return false;
@@ -123,10 +121,16 @@ static tdb_off_t hbucket_off(tdb_off_t group_start, unsigned bucket)
                + (bucket % (1 << TDB_HASH_GROUP_BITS)) * sizeof(tdb_off_t);
 }
 
-/* Truncated hashes can't be all 1: that's how we spot a sub-hash */
 bool is_subhash(tdb_off_t val)
 {
-       return val >> (64-TDB_OFF_UPPER_STEAL) == (1<<TDB_OFF_UPPER_STEAL) - 1;
+       return (val >> TDB_OFF_UPPER_STEAL_SUBHASH_BIT) & 1;
+}
+
+/* FIXME: Guess the depth, don't over-lock! */
+static tdb_off_t hlock_range(tdb_off_t group, tdb_off_t *size)
+{
+       *size = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
+       return group << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
 }
 
 /* This is the core routine which searches the hashtable for an entry.
@@ -149,11 +153,7 @@ tdb_off_t find_and_lock(struct tdb_context *tdb,
        group = use_bits(h, TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
        h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
 
-       /* FIXME: Guess the depth, don't over-lock! */
-       h->hlock_start = (tdb_off_t)group
-               << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
-       h->hlock_range = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS
-                                       - TDB_HASH_GROUP_BITS));
+       h->hlock_start = hlock_range(group, &h->hlock_range);
        if (tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype,
                            TDB_LOCK_WAIT))
                return TDB_OFF_ERR;
@@ -236,8 +236,8 @@ fail:
 /* I wrote a simple test, expanding a hash to 2GB, for the following
  * cases:
  * 1) Expanding all the buckets at once,
- * 2) Expanding the most-populated bucket,
- * 3) Expanding the bucket we wanted to place the new entry ito.
+ * 2) Expanding the bucket we wanted to place the new entry into.
+ * 3) Expanding the most-populated bucket,
  *
  * I measured the worst/average/best density during this process.
  * 1) 3%/16%/30%
@@ -326,7 +326,6 @@ static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash,
        if (hash_used + TDB_SUBLEVEL_HASH_BITS > 64)
                abort();
 
-       /* FIXME: Do truncated hash bits if we can! */
        h.h = hash_record(tdb, off);
        gnum = use_bits(&h, TDB_SUBLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS);
        h.group_start = subhash + sizeof(struct tdb_used_record)
@@ -355,6 +354,7 @@ static int expand_group(struct tdb_context *tdb, struct hash_info *h)
        if (subhash == TDB_OFF_ERR)
                return -1;
 
+       add_stat(tdb, alloc_subhash, 1);
        if (zero_out(tdb, subhash + sizeof(struct tdb_used_record),
                     sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS) == -1)
                return -1;
@@ -374,7 +374,7 @@ static int expand_group(struct tdb_context *tdb, struct hash_info *h)
        /* assert(num_vals); */
 
        /* Overwrite expanded bucket with subhash pointer. */
-       h->group[bucket] = subhash | ~((1ULL << (64 - TDB_OFF_UPPER_STEAL))-1);
+       h->group[bucket] = subhash | (1ULL << TDB_OFF_UPPER_STEAL_SUBHASH_BIT);
 
        /* Put values back. */
        for (i = 0; i < num_vals; i++) {
@@ -561,10 +561,11 @@ int next_in_hash(struct tdb_context *tdb, int ltype,
                                return -1;
                        }
                        if (rec_magic(&rec) != TDB_MAGIC) {
-                               tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
-                                        "next_in_hash:"
-                                        " corrupt record at %llu\n",
-                                        (long long)off);
+                               tdb_logerr(tdb, TDB_ERR_CORRUPT,
+                                          TDB_DEBUG_FATAL,
+                                          "next_in_hash:"
+                                          " corrupt record at %llu",
+                                          (long long)off);
                                return -1;
                        }
 
@@ -610,3 +611,46 @@ int first_in_hash(struct tdb_context *tdb, int ltype,
 
        return next_in_hash(tdb, ltype, tinfo, kbuf, dlen);
 }
+
+/* Even if the entry isn't in this hash bucket, you'd have to lock this
+ * bucket to find it. */
+static int chainlock(struct tdb_context *tdb, const TDB_DATA *key,
+                    int ltype, enum tdb_lock_flags waitflag,
+                    const char *func)
+{
+       int ret;
+       uint64_t h = tdb_hash(tdb, key->dptr, key->dsize);
+       tdb_off_t lockstart, locksize;
+       unsigned int group, gbits;
+
+       gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
+       group = bits(h, 64 - gbits, gbits);
+
+       lockstart = hlock_range(group, &locksize);
+
+       ret = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag);
+       tdb_trace_1rec(tdb, func, *key);
+       return ret;
+}
+
+/* lock/unlock one hash chain. This is meant to be used to reduce
+   contention - it cannot guarantee how many records will be locked */
+int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
+{
+       return chainlock(tdb, &key, F_WRLCK, TDB_LOCK_WAIT, "tdb_chainlock");
+}
+
+int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
+{
+       uint64_t h = tdb_hash(tdb, key.dptr, key.dsize);
+       tdb_off_t lockstart, locksize;
+       unsigned int group, gbits;
+
+       gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
+       group = bits(h, 64 - gbits, gbits);
+
+       lockstart = hlock_range(group, &locksize);
+
+       tdb_trace_1rec(tdb, "tdb_chainunlock", key);
+       return tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK);
+}