/* Remove from free bucket. */
static int remove_from_list(struct tdb_context *tdb,
- tdb_off_t b_off, struct tdb_free_record *r)
+ tdb_off_t b_off, tdb_off_t r_off,
+ struct tdb_free_record *r)
{
tdb_off_t off;
} else {
off = r->prev + offsetof(struct tdb_free_record, next);
}
+
+#ifdef DEBUG
+ if (tdb_read_off(tdb, off) != r_off) {
+ tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+ "remove_from_list: %llu bad prev in list %llu\n",
+ (long long)r_off, (long long)b_off);
+ return -1;
+ }
+#endif
+
/* r->prev->next = r->next */
if (tdb_write_off(tdb, off, r->next)) {
return -1;
if (r->next != 0) {
off = r->next + offsetof(struct tdb_free_record, prev);
/* r->next->prev = r->prev */
+
+#ifdef DEBUG
+ if (tdb_read_off(tdb, off) != r_off) {
+ tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+ "remove_from_list: %llu bad list %llu\n",
+ (long long)r_off, (long long)b_off);
+ return -1;
+ }
+#endif
+
if (tdb_write_off(tdb, off, r->prev)) {
return -1;
}
return -1;
if (new->next) {
+#ifdef DEBUG
+ if (tdb_read_off(tdb,
+ new->next
+ + offsetof(struct tdb_free_record, prev))
+ != 0) {
+ tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+ "enqueue_in_free: %llu bad head prev %llu\n",
+ (long long)new->next, (long long)b_off);
+ return -1;
+ }
+#endif
/* next->prev = new. */
if (tdb_write_off(tdb, new->next
+ offsetof(struct tdb_free_record, prev),
return ret;
}
+static size_t adjust_size(size_t keylen, size_t datalen, bool want_extra)
+{
+ size_t size = keylen + datalen;
+
+ /* We want at least 50% growth for data. */
+ if (want_extra)
+ size += datalen/2;
+
+ if (size < TDB_MIN_DATA_LEN)
+ size = TDB_MIN_DATA_LEN;
+
+ /* Round to next uint64_t boundary. */
+ return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
+}
+
/* If we have enough left over to be useful, split that off. */
static int to_used_record(struct tdb_context *tdb,
unsigned int zone_bits,
break;
}
- if (remove_from_list(tdb, nb_off, r) == -1) {
+ if (remove_from_list(tdb, nb_off, end, r) == -1) {
tdb_unlock_free_bucket(tdb, nb_off);
goto err;
}
goto err;
}
- if (remove_from_list(tdb, b_off, r) == -1)
+ if (remove_from_list(tdb, b_off, off, r) == -1)
goto err;
/* We have to drop this to avoid deadlocks. */
if (best_off) {
use_best:
/* We're happy with this size: take it. */
- if (remove_from_list(tdb, b_off, &best) != 0)
+ if (remove_from_list(tdb, b_off, best_off, &best) != 0)
goto unlock_err;
tdb_unlock_free_bucket(tdb, b_off);
int set_header(struct tdb_context *tdb,
struct tdb_used_record *rec,
uint64_t keylen, uint64_t datalen,
- uint64_t actuallen, uint64_t hash,
+ uint64_t actuallen, unsigned hashlow,
unsigned int zone_bits)
{
uint64_t keybits = (fls64(keylen) + 1) / 2;
/* Use bottom bits of hash, so it's independent of hash table size. */
rec->magic_and_meta
= zone_bits
- | ((hash & ((1 << 5)-1)) << 6)
+ | ((hashlow & ((1 << 5)-1)) << 6)
| ((actuallen - (keylen + datalen)) << 11)
| (keybits << 43)
| (TDB_MAGIC << 48);
return -1;
}
-static tdb_len_t adjust_size(size_t keylen, size_t datalen, bool growing)
-{
- tdb_len_t size = keylen + datalen;
-
- if (size < TDB_MIN_DATA_LEN)
- size = TDB_MIN_DATA_LEN;
-
- /* Overallocate if this is coming from an enlarging store. */
- if (growing)
- size += datalen / 2;
-
- /* Round to next uint64_t boundary. */
- return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
-}
-
/* This won't fail: it will expand the database if it has to. */
tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
uint64_t hash, bool growing)