]> git.ozlabs.org Git - ccan/blobdiff - ccan/tdb2/hash.c
tdb2: remove tdb_get()
[ccan] / ccan / tdb2 / hash.c
index 0fd4774939d0399b9aa1706a3bf4d01b53c69bbd..f8d6b5812ebfb5307e35f3203dbe0ded9abdbde5 100644 (file)
@@ -42,17 +42,19 @@ uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
 
 uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off)
 {
-       struct tdb_used_record pad, *r;
+       const struct tdb_used_record *r;
        const void *key;
        uint64_t klen, hash;
 
-       r = tdb_get(tdb, off, &pad, sizeof(pad));
+       r = tdb_access_read(tdb, off, sizeof(*r), true);
        if (!r)
                /* FIXME */
                return 0;
 
        klen = rec_key_length(r);
-       key = tdb_access_read(tdb, off + sizeof(pad), klen, false);
+       tdb_access_release(tdb, r);
+
+       key = tdb_access_read(tdb, off + sizeof(*r), klen, false);
        if (!key)
                return 0;
 
@@ -129,6 +131,13 @@ bool is_subhash(tdb_off_t val)
        return val >> (64-TDB_OFF_UPPER_STEAL) == (1<<TDB_OFF_UPPER_STEAL) - 1;
 }
 
+/* FIXME: Guess the depth, don't over-lock! */
+static tdb_off_t hlock_range(tdb_off_t group, tdb_off_t *size)
+{
+       *size = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
+       return group << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
+}
+
 /* This is the core routine which searches the hashtable for an entry.
  * On error, no locks are held and TDB_OFF_ERR is returned.
  * Otherwise, hinfo is filled in (and the optional tinfo).
@@ -149,11 +158,7 @@ tdb_off_t find_and_lock(struct tdb_context *tdb,
        group = use_bits(h, TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
        h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
 
-       /* FIXME: Guess the depth, don't over-lock! */
-       h->hlock_start = (tdb_off_t)group
-               << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
-       h->hlock_range = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS
-                                       - TDB_HASH_GROUP_BITS));
+       h->hlock_start = hlock_range(group, &h->hlock_range);
        if (tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype,
                            TDB_LOCK_WAIT))
                return TDB_OFF_ERR;
@@ -355,6 +360,7 @@ static int expand_group(struct tdb_context *tdb, struct hash_info *h)
        if (subhash == TDB_OFF_ERR)
                return -1;
 
+       add_stat(tdb, alloc_subhash, 1);
        if (zero_out(tdb, subhash + sizeof(struct tdb_used_record),
                     sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS) == -1)
                return -1;
@@ -537,7 +543,7 @@ again:
 /* Return 1 if we find something, 0 if not, -1 on error. */
 int next_in_hash(struct tdb_context *tdb, int ltype,
                 struct traverse_info *tinfo,
-                TDB_DATA *kbuf, unsigned int *dlen)
+                TDB_DATA *kbuf, size_t *dlen)
 {
        const unsigned group_bits = TDB_TOPLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS;
        tdb_off_t hlock_start, hlock_range, off;
@@ -560,6 +566,14 @@ int next_in_hash(struct tdb_context *tdb, int ltype,
                                                  ltype);
                                return -1;
                        }
+                       if (rec_magic(&rec) != TDB_MAGIC) {
+                               tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                                        "next_in_hash:"
+                                        " corrupt record at %llu\n",
+                                        (long long)off);
+                               return -1;
+                       }
+
                        kbuf->dsize = rec_key_length(&rec);
 
                        /* They want data as well? */
@@ -591,7 +605,7 @@ int next_in_hash(struct tdb_context *tdb, int ltype,
 /* Return 1 if we find something, 0 if not, -1 on error. */
 int first_in_hash(struct tdb_context *tdb, int ltype,
                  struct traverse_info *tinfo,
-                 TDB_DATA *kbuf, unsigned int *dlen)
+                 TDB_DATA *kbuf, size_t *dlen)
 {
        tinfo->prev = 0;
        tinfo->toplevel_group = 0;
@@ -603,40 +617,45 @@ int first_in_hash(struct tdb_context *tdb, int ltype,
        return next_in_hash(tdb, ltype, tinfo, kbuf, dlen);
 }
 
-TDB_DATA tdb_firstkey(struct tdb_context *tdb)
+/* Even if the entry isn't in this hash bucket, you'd have to lock this
+ * bucket to find it. */
+static int chainlock(struct tdb_context *tdb, const TDB_DATA *key,
+                    int ltype, enum tdb_lock_flags waitflag,
+                    const char *func)
 {
-       struct traverse_info tinfo;
-       struct tdb_data k;
-       switch (first_in_hash(tdb, F_RDLCK, &tinfo, &k, NULL)) {
-       case 1:
-               return k;
-       case 0:
-               tdb->ecode = TDB_SUCCESS;
-               /* Fall thru... */
-       default:
-               return tdb_null;
-       }
-}              
+       int ret;
+       uint64_t h = tdb_hash(tdb, key->dptr, key->dsize);
+       tdb_off_t lockstart, locksize;
+       unsigned int group, gbits;
+
+       gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
+       group = bits(h, 64 - gbits, gbits);
+
+       lockstart = hlock_range(group, &locksize);
+
+       ret = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag);
+       tdb_trace_1rec(tdb, func, *key);
+       return ret;
+}
 
-/* We lock twice, not very efficient.  We could keep last key & tinfo cached. */
-TDB_DATA tdb_nextkey(struct tdb_context *tdb, TDB_DATA key)
+/* lock/unlock one hash chain. This is meant to be used to reduce
+   contention - it cannot guarantee how many records will be locked */
+int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
 {
-       struct traverse_info tinfo;
-       struct hash_info h;
-       struct tdb_used_record rec;
-
-       tinfo.prev = find_and_lock(tdb, key, F_RDLCK, &h, &rec, &tinfo);
-       if (unlikely(tinfo.prev == TDB_OFF_ERR))
-               return tdb_null;
-       tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_RDLCK);
-
-       switch (next_in_hash(tdb, F_RDLCK, &tinfo, &key, NULL)) {
-       case 1:
-               return key;
-       case 0:
-               tdb->ecode = TDB_SUCCESS;
-               /* Fall thru... */
-       default:
-               return tdb_null;
-       }
-}              
+       return chainlock(tdb, &key, F_WRLCK, TDB_LOCK_WAIT, "tdb_chainlock");
+}
+
+int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
+{
+       uint64_t h = tdb_hash(tdb, key.dptr, key.dsize);
+       tdb_off_t lockstart, locksize;
+       unsigned int group, gbits;
+
+       gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
+       group = bits(h, 64 - gbits, gbits);
+
+       lockstart = hlock_range(group, &locksize);
+
+       tdb_trace_1rec(tdb, "tdb_chainunlock", key);
+       return tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK);
+}