tdb2: minor optimization for set_header
[ccan] / ccan / tdb2 / free.c
index 5284510a07fbdfd3aa5341360c29448e3eb9f183..e6e77bf616666deed3940b07707616485bd29812 100644 (file)
 */
 #include "private.h"
 #include <ccan/likely/likely.h>
+#include <ccan/ilog/ilog.h>
 #include <time.h>
 #include <assert.h>
 #include <limits.h>
 
 static unsigned fls64(uint64_t val)
 {
-#if HAVE_BUILTIN_CLZL
-       if (val <= ULONG_MAX) {
-               /* This is significantly faster! */
-               return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
-       } else {
-#endif
-       uint64_t r = 64;
-
-       if (!val)
-               return 0;
-       if (!(val & 0xffffffff00000000ull)) {
-               val <<= 32;
-               r -= 32;
-       }
-       if (!(val & 0xffff000000000000ull)) {
-               val <<= 16;
-               r -= 16;
-       }
-       if (!(val & 0xff00000000000000ull)) {
-               val <<= 8;
-               r -= 8;
-       }
-       if (!(val & 0xf000000000000000ull)) {
-               val <<= 4;
-               r -= 4;
-       }
-       if (!(val & 0xc000000000000000ull)) {
-               val <<= 2;
-               r -= 2;
-       }
-       if (!(val & 0x8000000000000000ull)) {
-               val <<= 1;
-               r -= 1;
-       }
-       return r;
-#if HAVE_BUILTIN_CLZL
-       }
-#endif
+       return ilog64(val);
 }
 
 /* In which bucket would we find a particular record size? (ignoring header) */
@@ -168,7 +132,8 @@ static tdb_off_t find_free_head(struct tdb_context *tdb, tdb_off_t bucket)
 
 /* Remove from free bucket. */
 static int remove_from_list(struct tdb_context *tdb,
-                           tdb_off_t b_off, struct tdb_free_record *r)
+                           tdb_off_t b_off, tdb_off_t r_off,
+                           struct tdb_free_record *r)
 {
        tdb_off_t off;
 
@@ -178,6 +143,16 @@ static int remove_from_list(struct tdb_context *tdb,
        } else {
                off = r->prev + offsetof(struct tdb_free_record, next);
        }
+
+#ifdef DEBUG
+       if (tdb_read_off(tdb, off) != r_off) {
+               tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                        "remove_from_list: %llu bad prev in list %llu\n",
+                        (long long)r_off, (long long)b_off);
+               return -1;
+       }
+#endif
+
        /* r->prev->next = r->next */
        if (tdb_write_off(tdb, off, r->next)) {
                return -1;
@@ -186,6 +161,16 @@ static int remove_from_list(struct tdb_context *tdb,
        if (r->next != 0) {
                off = r->next + offsetof(struct tdb_free_record, prev);
                /* r->next->prev = r->prev */
+
+#ifdef DEBUG
+               if (tdb_read_off(tdb, off) != r_off) {
+                       tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                                "remove_from_list: %llu bad list %llu\n",
+                                (long long)r_off, (long long)b_off);
+                       return -1;
+               }
+#endif
+
                if (tdb_write_off(tdb, off, r->prev)) {
                        return -1;
                }
@@ -206,6 +191,17 @@ static int enqueue_in_free(struct tdb_context *tdb,
                return -1;
 
        if (new->next) {
+#ifdef DEBUG
+               if (tdb_read_off(tdb,
+                                new->next
+                                + offsetof(struct tdb_free_record, prev))
+                   != 0) {
+                       tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                                "enqueue_in_free: %llu bad head prev %llu\n",
+                                (long long)new->next, (long long)b_off);
+                       return -1;
+               }
+#endif
                /* next->prev = new. */
                if (tdb_write_off(tdb, new->next
                                  + offsetof(struct tdb_free_record, prev),
@@ -244,6 +240,21 @@ int add_free_record(struct tdb_context *tdb,
        return ret;
 }
 
+static size_t adjust_size(size_t keylen, size_t datalen, bool want_extra)
+{
+       size_t size = keylen + datalen;
+
+       /* We want at least 50% growth for data. */
+       if (want_extra)
+               size += datalen/2;
+
+       if (size < TDB_MIN_DATA_LEN)
+               size = TDB_MIN_DATA_LEN;
+
+       /* Round to next uint64_t boundary. */
+       return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
+}
+
 /* If we have enough left over to be useful, split that off. */
 static int to_used_record(struct tdb_context *tdb,
                          unsigned int zone_bits,
@@ -308,7 +319,7 @@ static int coalesce(struct tdb_context *tdb,
                        break;
                }
 
-               if (remove_from_list(tdb, nb_off, r) == -1) {
+               if (remove_from_list(tdb, nb_off, end, r) == -1) {
                        tdb_unlock_free_bucket(tdb, nb_off);
                        goto err;
                }
@@ -334,7 +345,7 @@ static int coalesce(struct tdb_context *tdb,
                goto err;
        }
 
-       if (remove_from_list(tdb, b_off, r) == -1)
+       if (remove_from_list(tdb, b_off, off, r) == -1)
                goto err;
 
        /* We have to drop this to avoid deadlocks. */
@@ -424,7 +435,7 @@ again:
        if (best_off) {
        use_best:
                /* We're happy with this size: take it. */
-               if (remove_from_list(tdb, b_off, &best) != 0)
+               if (remove_from_list(tdb, b_off, best_off, &best) != 0)
                        goto unlock_err;
                tdb_unlock_free_bucket(tdb, b_off);
 
@@ -505,7 +516,7 @@ static tdb_off_t get_free(struct tdb_context *tdb, size_t size,
 int set_header(struct tdb_context *tdb,
               struct tdb_used_record *rec,
               uint64_t keylen, uint64_t datalen,
-              uint64_t actuallen, uint64_t hash,
+              uint64_t actuallen, unsigned hashlow,
               unsigned int zone_bits)
 {
        uint64_t keybits = (fls64(keylen) + 1) / 2;
@@ -513,7 +524,7 @@ int set_header(struct tdb_context *tdb,
        /* Use bottom bits of hash, so it's independent of hash table size. */
        rec->magic_and_meta
                = zone_bits
-               | ((hash & ((1 << 5)-1)) << 6)
+               | ((hashlow & ((1 << 5)-1)) << 6)
                | ((actuallen - (keylen + datalen)) << 11)
                | (keybits << 43)
                | (TDB_MAGIC << 48);
@@ -633,21 +644,6 @@ fail:
        return -1;
 }
 
-static tdb_len_t adjust_size(size_t keylen, size_t datalen, bool growing)
-{
-       tdb_len_t size = keylen + datalen;
-
-       if (size < TDB_MIN_DATA_LEN)
-               size = TDB_MIN_DATA_LEN;
-
-       /* Overallocate if this is coming from an enlarging store. */
-       if (growing)
-               size += datalen / 2;
-
-       /* Round to next uint64_t boundary. */
-       return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
-}
-
 /* This won't fail: it will expand the database if it has to. */
 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
                uint64_t hash, bool growing)