]> git.ozlabs.org Git - ccan/blobdiff - ccan/tdb2/free.c
tdb2: minor optimization for set_header
[ccan] / ccan / tdb2 / free.c
index e6d871a58607180c9a81afec54ccbdc7a88c0e2a..e6e77bf616666deed3940b07707616485bd29812 100644 (file)
@@ -132,7 +132,8 @@ static tdb_off_t find_free_head(struct tdb_context *tdb, tdb_off_t bucket)
 
 /* Remove from free bucket. */
 static int remove_from_list(struct tdb_context *tdb,
-                           tdb_off_t b_off, struct tdb_free_record *r)
+                           tdb_off_t b_off, tdb_off_t r_off,
+                           struct tdb_free_record *r)
 {
        tdb_off_t off;
 
@@ -142,6 +143,16 @@ static int remove_from_list(struct tdb_context *tdb,
        } else {
                off = r->prev + offsetof(struct tdb_free_record, next);
        }
+
+#ifdef DEBUG
+       if (tdb_read_off(tdb, off) != r_off) {
+               tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                        "remove_from_list: %llu bad prev in list %llu\n",
+                        (long long)r_off, (long long)b_off);
+               return -1;
+       }
+#endif
+
        /* r->prev->next = r->next */
        if (tdb_write_off(tdb, off, r->next)) {
                return -1;
@@ -150,6 +161,16 @@ static int remove_from_list(struct tdb_context *tdb,
        if (r->next != 0) {
                off = r->next + offsetof(struct tdb_free_record, prev);
                /* r->next->prev = r->prev */
+
+#ifdef DEBUG
+               if (tdb_read_off(tdb, off) != r_off) {
+                       tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                                "remove_from_list: %llu bad list %llu\n",
+                                (long long)r_off, (long long)b_off);
+                       return -1;
+               }
+#endif
+
                if (tdb_write_off(tdb, off, r->prev)) {
                        return -1;
                }
@@ -170,6 +191,17 @@ static int enqueue_in_free(struct tdb_context *tdb,
                return -1;
 
        if (new->next) {
+#ifdef DEBUG
+               if (tdb_read_off(tdb,
+                                new->next
+                                + offsetof(struct tdb_free_record, prev))
+                   != 0) {
+                       tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                                "enqueue_in_free: %llu bad head prev %llu\n",
+                                (long long)new->next, (long long)b_off);
+                       return -1;
+               }
+#endif
                /* next->prev = new. */
                if (tdb_write_off(tdb, new->next
                                  + offsetof(struct tdb_free_record, prev),
@@ -208,6 +240,21 @@ int add_free_record(struct tdb_context *tdb,
        return ret;
 }
 
+static size_t adjust_size(size_t keylen, size_t datalen, bool want_extra)
+{
+       size_t size = keylen + datalen;
+
+       /* We want at least 50% growth for data. */
+       if (want_extra)
+               size += datalen/2;
+
+       if (size < TDB_MIN_DATA_LEN)
+               size = TDB_MIN_DATA_LEN;
+
+       /* Round to next uint64_t boundary. */
+       return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
+}
+
 /* If we have enough left over to be useful, split that off. */
 static int to_used_record(struct tdb_context *tdb,
                          unsigned int zone_bits,
@@ -272,7 +319,7 @@ static int coalesce(struct tdb_context *tdb,
                        break;
                }
 
-               if (remove_from_list(tdb, nb_off, r) == -1) {
+               if (remove_from_list(tdb, nb_off, end, r) == -1) {
                        tdb_unlock_free_bucket(tdb, nb_off);
                        goto err;
                }
@@ -298,7 +345,7 @@ static int coalesce(struct tdb_context *tdb,
                goto err;
        }
 
-       if (remove_from_list(tdb, b_off, r) == -1)
+       if (remove_from_list(tdb, b_off, off, r) == -1)
                goto err;
 
        /* We have to drop this to avoid deadlocks. */
@@ -388,7 +435,7 @@ again:
        if (best_off) {
        use_best:
                /* We're happy with this size: take it. */
-               if (remove_from_list(tdb, b_off, &best) != 0)
+               if (remove_from_list(tdb, b_off, best_off, &best) != 0)
                        goto unlock_err;
                tdb_unlock_free_bucket(tdb, b_off);
 
@@ -469,7 +516,7 @@ static tdb_off_t get_free(struct tdb_context *tdb, size_t size,
 int set_header(struct tdb_context *tdb,
               struct tdb_used_record *rec,
               uint64_t keylen, uint64_t datalen,
-              uint64_t actuallen, uint64_t hash,
+              uint64_t actuallen, unsigned hashlow,
               unsigned int zone_bits)
 {
        uint64_t keybits = (fls64(keylen) + 1) / 2;
@@ -477,7 +524,7 @@ int set_header(struct tdb_context *tdb,
        /* Use bottom bits of hash, so it's independent of hash table size. */
        rec->magic_and_meta
                = zone_bits
-               | ((hash & ((1 << 5)-1)) << 6)
+               | ((hashlow & ((1 << 5)-1)) << 6)
                | ((actuallen - (keylen + datalen)) << 11)
                | (keybits << 43)
                | (TDB_MAGIC << 48);
@@ -597,21 +644,6 @@ fail:
        return -1;
 }
 
-static tdb_len_t adjust_size(size_t keylen, size_t datalen, bool growing)
-{
-       tdb_len_t size = keylen + datalen;
-
-       if (size < TDB_MIN_DATA_LEN)
-               size = TDB_MIN_DATA_LEN;
-
-       /* Overallocate if this is coming from an enlarging store. */
-       if (growing)
-               size += datalen / 2;
-
-       /* Round to next uint64_t boundary. */
-       return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
-}
-
 /* This won't fail: it will expand the database if it has to. */
 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
                uint64_t hash, bool growing)