]> git.ozlabs.org Git - ccan/blobdiff - ccan/tdb2/free.c
tdb2: trivial optimization for free list
[ccan] / ccan / tdb2 / free.c
index 8ff5d74a3fd7a8a400b243b9dd8a22c1dfe44af6..dca8ff10709c81e3848b0c1fb9e3e4431ed0996f 100644 (file)
@@ -244,12 +244,15 @@ static size_t record_leftover(size_t keylen, size_t datalen,
        return leftover;
 }
 
-/* FIXME: Shortcut common case where tdb->flist == flist */
 static tdb_off_t flist_offset(struct tdb_context *tdb, unsigned int flist)
 {
-       tdb_off_t off = first_flist(tdb);
+       tdb_off_t off;
        unsigned int i;
 
+       if (likely(tdb->flist == flist))
+               return tdb->flist_off;
+
+       off = first_flist(tdb);
        for (i = 0; i < flist; i++)
                off = next_flist(tdb, off);
        return off;
@@ -262,6 +265,7 @@ static int coalesce(struct tdb_context *tdb,
        struct tdb_free_record pad, *r;
        tdb_off_t end;
 
+       add_stat(tdb, alloc_coalesce_tried, 1);
        end = off + sizeof(struct tdb_used_record) + data_len;
 
        while (end < tdb->map_size) {
@@ -281,8 +285,10 @@ static int coalesce(struct tdb_context *tdb,
                nb_off = bucket_off(flist_offset(tdb, flist), bucket);
 
                /* We may be violating lock order here, so best effort. */
-               if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT) == -1)
+               if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT) == -1) {
+                       add_stat(tdb, alloc_coalesce_lockfail, 1);
                        break;
+               }
 
                /* Now we have lock, re-check. */
                r = tdb_get(tdb, end, &pad, sizeof(pad));
@@ -292,12 +298,14 @@ static int coalesce(struct tdb_context *tdb,
                }
 
                if (unlikely(frec_magic(r) != TDB_FREE_MAGIC)) {
+                       add_stat(tdb, alloc_coalesce_race, 1);
                        tdb_unlock_free_bucket(tdb, nb_off);
                        break;
                }
 
                if (unlikely(frec_flist(r) != flist)
                    || unlikely(size_to_bucket(frec_len(r)) != bucket)) {
+                       add_stat(tdb, alloc_coalesce_race, 1);
                        tdb_unlock_free_bucket(tdb, nb_off);
                        break;
                }
@@ -309,6 +317,7 @@ static int coalesce(struct tdb_context *tdb,
 
                end += sizeof(struct tdb_used_record) + frec_len(r);
                tdb_unlock_free_bucket(tdb, nb_off);
+               add_stat(tdb, alloc_coalesce_num_merged, 1);
        }
 
        /* Didn't find any adjacent free? */
@@ -343,6 +352,7 @@ static int coalesce(struct tdb_context *tdb,
        if (tdb_access_commit(tdb, r) != 0)
                goto err;
 
+       add_stat(tdb, alloc_coalesce_succeeded, 1);
        tdb_unlock_free_bucket(tdb, b_off);
 
        if (add_free_record(tdb, off, end - off) == -1)
@@ -368,6 +378,7 @@ static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
        double multiplier;
        size_t size = adjust_size(keylen, datalen);
 
+       add_stat(tdb, allocs, 1);
 again:
        b_off = bucket_off(flist_off, bucket);
 
@@ -450,15 +461,18 @@ again:
                if (tdb_write_convert(tdb, best_off, &rec, sizeof(rec)) != 0)
                        goto unlock_err;
 
-               tdb_unlock_free_bucket(tdb, b_off);
-
+               /* Bucket of leftover will be <= current bucket, so nested
+                * locking is allowed. */
                if (leftover) {
+                       add_stat(tdb, alloc_leftover, 1);
                        if (add_free_record(tdb,
                                            best_off + sizeof(rec)
                                            + frec_len(&best) - leftover,
                                            leftover))
-                               return TDB_OFF_ERR;
+                               best_off = TDB_OFF_ERR;
                }
+               tdb_unlock_free_bucket(tdb, b_off);
+
                return best_off;
        }
 
@@ -500,6 +514,10 @@ static tdb_off_t get_free(struct tdb_context *tdb,
                        if (off == TDB_OFF_ERR)
                                return TDB_OFF_ERR;
                        if (off != 0) {
+                               if (b == start_b)
+                                       add_stat(tdb, alloc_bucket_exact, 1);
+                               if (b == TDB_FREE_BUCKETS - 1)
+                                       add_stat(tdb, alloc_bucket_max, 1);
                                /* Worked?  Stay using this list. */
                                tdb->flist_off = flist_off;
                                tdb->flist = flist;
@@ -511,6 +529,7 @@ static tdb_off_t get_free(struct tdb_context *tdb,
                /* Hmm, try next list. */
                flist_off = next_flist(tdb, flist_off);
                flist++;
+
                if (flist_off == 0) {
                        wrapped = true;
                        flist_off = first_flist(tdb);
@@ -594,6 +613,7 @@ static int tdb_expand(struct tdb_context *tdb, tdb_len_t size)
        /* We need to drop this lock before adding free record. */
        tdb_unlock_expand(tdb, F_WRLCK);
 
+       add_stat(tdb, expands, 1);
        return add_free_record(tdb, old_size, wanted);
 }