*/
#include "private.h"
#include <ccan/likely/likely.h>
+#include <ccan/ilog/ilog.h>
#include <time.h>
#include <assert.h>
#include <limits.h>
-/* We have to be able to fit a free record here. */
-#define MIN_DATA_LEN \
- (sizeof(struct tdb_free_record) - sizeof(struct tdb_used_record))
-
-/* We have a series of free lists, each one covering a "zone" of the file.
- *
- * For each zone we have a series of per-size buckets, and a final bucket for
- * "too big".
- *
- * It's possible to move the free_list_head, but *only* under the allrecord
- * lock. */
-static tdb_off_t free_list_off(struct tdb_context *tdb, unsigned int list)
-{
- return tdb->header.v.free_off + list * sizeof(tdb_off_t);
-}
-
-/* We're a library: playing with srandom() is unfriendly. srandom_r
- * probably lacks portability. We don't need very random here. */
-static unsigned int quick_random(struct tdb_context *tdb)
-{
- return getpid() + time(NULL) + (unsigned long)tdb;
-}
-
-/* Start by using a random zone to spread the load. */
-void tdb_zone_init(struct tdb_context *tdb)
-{
- /*
- * We read num_zones without a proper lock, so we could have
- * gotten a partial read. Since zone_bits is 1 byte long, we
- * can trust that; even if it's increased, the number of zones
- * cannot have decreased. And using the map size means we
- * will not start with a zone which hasn't been filled yet.
- */
- tdb->last_zone = quick_random(tdb)
- % ((tdb->map_size >> tdb->header.v.zone_bits) + 1);
-}
-
static unsigned fls64(uint64_t val)
{
-#if HAVE_BUILTIN_CLZL
- if (val <= ULONG_MAX) {
- /* This is significantly faster! */
- return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
- } else {
-#endif
- uint64_t r = 64;
-
- if (!val)
- return 0;
- if (!(val & 0xffffffff00000000ull)) {
- val <<= 32;
- r -= 32;
- }
- if (!(val & 0xffff000000000000ull)) {
- val <<= 16;
- r -= 16;
- }
- if (!(val & 0xff00000000000000ull)) {
- val <<= 8;
- r -= 8;
- }
- if (!(val & 0xf000000000000000ull)) {
- val <<= 4;
- r -= 4;
- }
- if (!(val & 0xc000000000000000ull)) {
- val <<= 2;
- r -= 2;
- }
- if (!(val & 0x8000000000000000ull)) {
- val <<= 1;
- r -= 1;
- }
- return r;
-#if HAVE_BUILTIN_CLZL
- }
-#endif
+ return ilog64(val);
}
/* In which bucket would we find a particular record size? (ignoring header) */
-unsigned int size_to_bucket(struct tdb_context *tdb, tdb_len_t data_len)
+unsigned int size_to_bucket(tdb_len_t data_len)
{
unsigned int bucket;
/* We can't have records smaller than this. */
- assert(data_len >= MIN_DATA_LEN);
+ assert(data_len >= TDB_MIN_DATA_LEN);
/* Ignoring the header... */
- if (data_len - MIN_DATA_LEN <= 64) {
- /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 6. */
- bucket = (data_len - MIN_DATA_LEN) / 8;
+ if (data_len - TDB_MIN_DATA_LEN <= 64) {
+ /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 8. */
+ bucket = (data_len - TDB_MIN_DATA_LEN) / 8;
} else {
/* After that we go power of 2. */
- bucket = fls64(data_len - MIN_DATA_LEN) + 2;
+ bucket = fls64(data_len - TDB_MIN_DATA_LEN) + 2;
}
- if (unlikely(bucket > tdb->header.v.free_buckets))
- bucket = tdb->header.v.free_buckets;
+ if (unlikely(bucket >= TDB_FREE_BUCKETS))
+ bucket = TDB_FREE_BUCKETS - 1;
return bucket;
}
-/* What zone does a block belong in? */
-tdb_off_t zone_of(struct tdb_context *tdb, tdb_off_t off)
+tdb_off_t first_ftable(struct tdb_context *tdb)
{
- assert(tdb->header_uptodate);
+ return tdb_read_off(tdb, offsetof(struct tdb_header, free_table));
+}
- return off >> tdb->header.v.zone_bits;
+tdb_off_t next_ftable(struct tdb_context *tdb, tdb_off_t ftable)
+{
+ return tdb_read_off(tdb, ftable + offsetof(struct tdb_freetable,next));
}
-/* Returns free_buckets + 1, or list number to search. */
-static tdb_off_t find_free_head(struct tdb_context *tdb, tdb_off_t bucket)
+int tdb_ftable_init(struct tdb_context *tdb)
{
- tdb_off_t first, off;
+ /* Use reservoir sampling algorithm to select a free list at random. */
+ unsigned int rnd, max = 0, count = 0;
+ tdb_off_t off;
+
+ tdb->ftable_off = off = first_ftable(tdb);
+ tdb->ftable = 0;
+
+ while (off) {
+ if (off == TDB_OFF_ERR)
+ return -1;
+
+ rnd = random();
+ if (rnd >= max) {
+ tdb->ftable_off = off;
+ tdb->ftable = count;
+ max = rnd;
+ }
+
+ off = next_ftable(tdb, off);
+ count++;
+ }
+ return 0;
+}
+/* Offset of a given bucket. */
+tdb_off_t bucket_off(tdb_off_t ftable_off, unsigned bucket)
+{
+ return ftable_off + offsetof(struct tdb_freetable, buckets)
+ + bucket * sizeof(tdb_off_t);
+}
+
+/* Returns free_buckets + 1, or list number to search. */
+static tdb_off_t find_free_head(struct tdb_context *tdb,
+ tdb_off_t ftable_off,
+ tdb_off_t bucket)
+{
/* Speculatively search for a non-zero bucket. */
- first = tdb->last_zone * (tdb->header.v.free_buckets+1) + bucket;
- off = tdb_find_nonzero_off(tdb, free_list_off(tdb, first),
- tdb->header.v.free_buckets + 1 - bucket);
- return bucket + off;
+ return tdb_find_nonzero_off(tdb, bucket_off(ftable_off, 0),
+ bucket, TDB_FREE_BUCKETS);
}
+/* Remove from free bucket. */
static int remove_from_list(struct tdb_context *tdb,
- tdb_off_t list, struct tdb_free_record *r)
+ tdb_off_t b_off, tdb_off_t r_off,
+ const struct tdb_free_record *r)
{
tdb_off_t off;
/* Front of list? */
- if (r->prev == 0) {
- off = free_list_off(tdb, list);
+ if (frec_prev(r) == 0) {
+ off = b_off;
} else {
- off = r->prev + offsetof(struct tdb_free_record, next);
+ off = frec_prev(r) + offsetof(struct tdb_free_record, next);
+ }
+
+#ifdef CCAN_TDB2_DEBUG
+ if (tdb_read_off(tdb, off) != r_off) {
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ "remove_from_list: %llu bad prev in list %llu",
+ (long long)r_off, (long long)b_off);
+ return -1;
}
+#endif
+
/* r->prev->next = r->next */
if (tdb_write_off(tdb, off, r->next)) {
return -1;
}
if (r->next != 0) {
- off = r->next + offsetof(struct tdb_free_record, prev);
+ off = r->next + offsetof(struct tdb_free_record,magic_and_prev);
/* r->next->prev = r->prev */
- if (tdb_write_off(tdb, off, r->prev)) {
+
+#ifdef CCAN_TDB2_DEBUG
+ if (tdb_read_off(tdb, off) & TDB_OFF_MASK != r_off) {
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ "remove_from_list: %llu bad list %llu",
+ (long long)r_off, (long long)b_off);
+ return -1;
+ }
+#endif
+
+ if (tdb_write_off(tdb, off, r->magic_and_prev)) {
return -1;
}
}
return 0;
}
-/* Enqueue in this free list. */
+/* Enqueue in this free bucket. */
static int enqueue_in_free(struct tdb_context *tdb,
- tdb_off_t list,
+ tdb_off_t b_off,
tdb_off_t off,
- struct tdb_free_record *new)
+ tdb_len_t len)
{
- new->prev = 0;
+ struct tdb_free_record new;
+ uint64_t magic = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL));
+
+ /* We only need to set ftable_and_len; rest is set in enqueue_in_free */
+ new.ftable_and_len = ((uint64_t)tdb->ftable << (64 - TDB_OFF_UPPER_STEAL))
+ | len;
+ /* prev = 0. */
+ new.magic_and_prev = magic;
+
/* new->next = head. */
- new->next = tdb_read_off(tdb, free_list_off(tdb, list));
- if (new->next == TDB_OFF_ERR)
+ new.next = tdb_read_off(tdb, b_off);
+ if (new.next == TDB_OFF_ERR)
return -1;
- if (new->next) {
+ if (new.next) {
+#ifdef CCAN_TDB2_DEBUG
+ if (tdb_read_off(tdb,
+ new.next + offsetof(struct tdb_free_record,
+ magic_and_prev))
+ != magic) {
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ "enqueue_in_free: %llu bad head"
+ " prev %llu",
+ (long long)new.next, (long long)b_off);
+ return -1;
+ }
+#endif
/* next->prev = new. */
- if (tdb_write_off(tdb, new->next
- + offsetof(struct tdb_free_record, prev),
- off) != 0)
+ if (tdb_write_off(tdb, new.next
+ + offsetof(struct tdb_free_record,
+ magic_and_prev),
+ off | magic) != 0)
return -1;
}
/* head = new */
- if (tdb_write_off(tdb, free_list_off(tdb, list), off) != 0)
+ if (tdb_write_off(tdb, b_off, off) != 0)
return -1;
-
- return tdb_write_convert(tdb, off, new, sizeof(*new));
+
+ return tdb_write_convert(tdb, off, &new, sizeof(new));
}
-/* List isn't locked. */
+/* List need not be locked. */
int add_free_record(struct tdb_context *tdb,
tdb_off_t off, tdb_len_t len_with_header)
{
- struct tdb_free_record new;
- tdb_off_t list;
+ tdb_off_t b_off;
+ tdb_len_t len;
int ret;
- assert(len_with_header >= sizeof(new));
-
- new.magic = TDB_FREE_MAGIC;
- new.data_len = len_with_header - sizeof(struct tdb_used_record);
+ assert(len_with_header >= sizeof(struct tdb_free_record));
- tdb->last_zone = zone_of(tdb, off);
- list = tdb->last_zone * (tdb->header.v.free_buckets+1)
- + size_to_bucket(tdb, new.data_len);
+ len = len_with_header - sizeof(struct tdb_used_record);
- if (tdb_lock_free_list(tdb, list, TDB_LOCK_WAIT) != 0)
+ b_off = bucket_off(tdb->ftable_off, size_to_bucket(len));
+ if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) != 0)
return -1;
- ret = enqueue_in_free(tdb, list, off, &new);
- tdb_unlock_free_list(tdb, list);
+ ret = enqueue_in_free(tdb, b_off, off, len);
+ tdb_unlock_free_bucket(tdb, b_off);
return ret;
}
+static size_t adjust_size(size_t keylen, size_t datalen)
+{
+ size_t size = keylen + datalen;
+
+ if (size < TDB_MIN_DATA_LEN)
+ size = TDB_MIN_DATA_LEN;
+
+ /* Round to next uint64_t boundary. */
+ return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
+}
+
/* If we have enough left over to be useful, split that off. */
-static int to_used_record(struct tdb_context *tdb,
- tdb_off_t off,
- tdb_len_t needed,
- tdb_len_t total_len,
- tdb_len_t *actual)
+static size_t record_leftover(size_t keylen, size_t datalen,
+ bool want_extra, size_t total_len)
{
- struct tdb_used_record used;
- tdb_len_t leftover;
+ ssize_t leftover;
- leftover = total_len - needed;
- if (leftover < sizeof(struct tdb_free_record))
- leftover = 0;
+ if (want_extra)
+ datalen += datalen / 2;
+ leftover = total_len - adjust_size(keylen, datalen);
- *actual = total_len - leftover;
+ if (leftover < (ssize_t)sizeof(struct tdb_free_record))
+ return 0;
- if (leftover) {
- if (add_free_record(tdb, off + sizeof(used) + *actual,
- total_len - needed))
- return -1;
- }
- return 0;
+ return leftover;
+}
+
+static tdb_off_t ftable_offset(struct tdb_context *tdb, unsigned int ftable)
+{
+ tdb_off_t off;
+ unsigned int i;
+
+ if (likely(tdb->ftable == ftable))
+ return tdb->ftable_off;
+
+ off = first_ftable(tdb);
+ for (i = 0; i < ftable; i++)
+ off = next_ftable(tdb, off);
+ return off;
}
-/* Note: we unlock the current list if we coalesce or fail. */
-static int coalesce(struct tdb_context *tdb, tdb_off_t off,
- tdb_off_t list, tdb_len_t data_len)
+/* Note: we unlock the current bucket if we coalesce or fail. */
+static int coalesce(struct tdb_context *tdb,
+ tdb_off_t off, tdb_off_t b_off, tdb_len_t data_len)
{
- struct tdb_free_record pad, *r;
- tdb_off_t end = off + sizeof(struct tdb_used_record) + data_len;
+ tdb_off_t end;
+ struct tdb_free_record rec;
- while (!tdb->methods->oob(tdb, end + sizeof(*r), 1)) {
- tdb_off_t nlist;
+ add_stat(tdb, alloc_coalesce_tried, 1);
+ end = off + sizeof(struct tdb_used_record) + data_len;
- r = tdb_get(tdb, end, &pad, sizeof(pad));
+ while (end < tdb->map_size) {
+ const struct tdb_free_record *r;
+ tdb_off_t nb_off;
+ unsigned ftable, bucket;
+
+ r = tdb_access_read(tdb, end, sizeof(*r), true);
if (!r)
goto err;
- if (r->magic != TDB_FREE_MAGIC)
+ if (frec_magic(r) != TDB_FREE_MAGIC
+ || frec_ftable(r) == TDB_FTABLE_NONE) {
+ tdb_access_release(tdb, r);
break;
+ }
- nlist = zone_of(tdb, end) * (tdb->header.v.free_buckets+1)
- + size_to_bucket(tdb, r->data_len);
+ ftable = frec_ftable(r);
+ bucket = size_to_bucket(frec_len(r));
+ nb_off = bucket_off(ftable_offset(tdb, ftable), bucket);
+ tdb_access_release(tdb, r);
/* We may be violating lock order here, so best effort. */
- if (tdb_lock_free_list(tdb, nlist, TDB_LOCK_NOWAIT) == -1)
+ if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT) == -1) {
+ add_stat(tdb, alloc_coalesce_lockfail, 1);
break;
+ }
/* Now we have lock, re-check. */
- r = tdb_get(tdb, end, &pad, sizeof(pad));
- if (!r) {
- tdb_unlock_free_list(tdb, nlist);
+ if (tdb_read_convert(tdb, end, &rec, sizeof(rec))) {
+ tdb_unlock_free_bucket(tdb, nb_off);
goto err;
}
- if (unlikely(r->magic != TDB_FREE_MAGIC)) {
- tdb_unlock_free_list(tdb, nlist);
+ if (unlikely(frec_magic(&rec) != TDB_FREE_MAGIC)) {
+ add_stat(tdb, alloc_coalesce_race, 1);
+ tdb_unlock_free_bucket(tdb, nb_off);
+ break;
+ }
+
+ if (unlikely(frec_ftable(&rec) != ftable)
+ || unlikely(size_to_bucket(frec_len(&rec)) != bucket)) {
+ add_stat(tdb, alloc_coalesce_race, 1);
+ tdb_unlock_free_bucket(tdb, nb_off);
break;
}
- if (remove_from_list(tdb, nlist, r) == -1) {
- tdb_unlock_free_list(tdb, nlist);
+ if (remove_from_list(tdb, nb_off, end, &rec) == -1) {
+ tdb_unlock_free_bucket(tdb, nb_off);
goto err;
}
- end += sizeof(struct tdb_used_record) + r->data_len;
- tdb_unlock_free_list(tdb, nlist);
+ end += sizeof(struct tdb_used_record) + frec_len(&rec);
+ tdb_unlock_free_bucket(tdb, nb_off);
+ add_stat(tdb, alloc_coalesce_num_merged, 1);
}
/* Didn't find any adjacent free? */
if (end == off + sizeof(struct tdb_used_record) + data_len)
return 0;
- /* OK, expand record */
- r = tdb_get(tdb, off, &pad, sizeof(pad));
- if (!r)
+ /* OK, expand initial record */
+ if (tdb_read_convert(tdb, off, &rec, sizeof(rec)))
goto err;
- if (r->data_len != data_len) {
- tdb->ecode = TDB_ERR_CORRUPT;
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "coalesce: expected data len %llu not %llu\n",
- (long long)data_len, (long long)r->data_len);
+ if (frec_len(&rec) != data_len) {
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ "coalesce: expected data len %zu not %zu",
+ (size_t)data_len, (size_t)frec_len(&rec));
goto err;
}
- if (remove_from_list(tdb, list, r) == -1)
+ if (remove_from_list(tdb, b_off, off, &rec) == -1)
goto err;
- /* We have to drop this to avoid deadlocks. */
- tdb_unlock_free_list(tdb, list);
+ /* We have to drop this to avoid deadlocks, so make sure record
+ * doesn't get coalesced by someone else! */
+ rec.ftable_and_len = (TDB_FTABLE_NONE << (64 - TDB_OFF_UPPER_STEAL))
+ | (end - off - sizeof(struct tdb_used_record));
+ if (tdb_write_off(tdb, off + offsetof(struct tdb_free_record,
+ ftable_and_len),
+ rec.ftable_and_len) != 0)
+ goto err;
+
+ add_stat(tdb, alloc_coalesce_succeeded, 1);
+ tdb_unlock_free_bucket(tdb, b_off);
if (add_free_record(tdb, off, end - off) == -1)
return -1;
return 1;
err:
- /* To unify error paths, we *always* unlock list. */
- tdb_unlock_free_list(tdb, list);
+ /* To unify error paths, we *always* unlock bucket on error. */
+ tdb_unlock_free_bucket(tdb, b_off);
return -1;
}
/* We need size bytes to put our key and data in. */
static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
- tdb_off_t bucket, size_t size,
- tdb_len_t *actual)
+ tdb_off_t ftable_off,
+ tdb_off_t bucket,
+ size_t keylen, size_t datalen,
+ bool want_extra,
+ unsigned magic,
+ unsigned hashlow)
{
- tdb_off_t list;
- tdb_off_t off, best_off;
- struct tdb_free_record pad, best = { 0 }, *r;
+ tdb_off_t off, b_off,best_off;
+ struct tdb_free_record best = { 0 };
double multiplier;
+ size_t size = adjust_size(keylen, datalen);
+ add_stat(tdb, allocs, 1);
again:
- list = tdb->last_zone * (tdb->header.v.free_buckets+1) + bucket;
+ b_off = bucket_off(ftable_off, bucket);
- /* Lock this list. */
- if (tdb_lock_free_list(tdb, list, TDB_LOCK_WAIT) == -1) {
+ /* FIXME: Try non-blocking wait first, to measure contention. */
+ /* Lock this bucket. */
+ if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == -1) {
return TDB_OFF_ERR;
}
- best.data_len = -1ULL;
+ best.ftable_and_len = -1ULL;
best_off = 0;
- multiplier = 1.0;
+
+ /* Get slack if we're after extra. */
+ if (want_extra)
+ multiplier = 1.5;
+ else
+ multiplier = 1.0;
/* Walk the list to see if any are large enough, getting less fussy
* as we go. */
- off = tdb_read_off(tdb, free_list_off(tdb, list));
+ off = tdb_read_off(tdb, b_off);
if (unlikely(off == TDB_OFF_ERR))
goto unlock_err;
while (off) {
- r = tdb_get(tdb, off, &pad, sizeof(*r));
+ const struct tdb_free_record *r;
+ tdb_len_t len;
+ tdb_off_t next;
+
+ r = tdb_access_read(tdb, off, sizeof(*r), true);
if (!r)
goto unlock_err;
- if (r->magic != TDB_FREE_MAGIC) {
- tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
- "lock_and_alloc: %llu non-free 0x%llx\n",
- (long long)off, (long long)r->magic);
+ if (frec_magic(r) != TDB_FREE_MAGIC) {
+ tdb_access_release(tdb, r);
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ "lock_and_alloc: %llu non-free 0x%llx",
+ (long long)off, (long long)r->magic_and_prev);
goto unlock_err;
}
- if (r->data_len >= size && r->data_len < best.data_len) {
+ if (frec_len(r) >= size && frec_len(r) < frec_len(&best)) {
best_off = off;
best = *r;
}
- if (best.data_len < size * multiplier && best_off)
- goto use_best;
+ if (frec_len(&best) < size * multiplier && best_off) {
+ tdb_access_release(tdb, r);
+ break;
+ }
multiplier *= 1.01;
+ next = r->next;
+ len = frec_len(r);
+ tdb_access_release(tdb, r);
+
/* Since we're going slow anyway, try coalescing here. */
- switch (coalesce(tdb, off, list, r->data_len)) {
+ switch (coalesce(tdb, off, b_off, len)) {
case -1:
/* This has already unlocked on error. */
return -1;
/* This has unlocked list, restart. */
goto again;
}
- off = r->next;
+ off = next;
}
/* If we found anything at all, use it. */
if (best_off) {
- use_best:
+ struct tdb_used_record rec;
+ size_t leftover;
+
/* We're happy with this size: take it. */
- if (remove_from_list(tdb, list, &best) != 0)
+ if (remove_from_list(tdb, b_off, best_off, &best) != 0)
goto unlock_err;
- tdb_unlock_free_list(tdb, list);
- if (to_used_record(tdb, best_off, size, best.data_len,
- actual)) {
- return -1;
+ leftover = record_leftover(keylen, datalen, want_extra,
+ frec_len(&best));
+
+ assert(keylen + datalen + leftover <= frec_len(&best));
+ /* We need to mark non-free before we drop lock, otherwise
+ * coalesce() could try to merge it! */
+ if (set_header(tdb, &rec, magic, keylen, datalen,
+ frec_len(&best) - leftover, hashlow) != 0)
+ goto unlock_err;
+
+ if (tdb_write_convert(tdb, best_off, &rec, sizeof(rec)) != 0)
+ goto unlock_err;
+
+ /* Bucket of leftover will be <= current bucket, so nested
+ * locking is allowed. */
+ if (leftover) {
+ add_stat(tdb, alloc_leftover, 1);
+ if (add_free_record(tdb,
+ best_off + sizeof(rec)
+ + frec_len(&best) - leftover,
+ leftover))
+ best_off = TDB_OFF_ERR;
}
+ tdb_unlock_free_bucket(tdb, b_off);
+
return best_off;
}
- tdb_unlock_free_list(tdb, list);
+ tdb_unlock_free_bucket(tdb, b_off);
return 0;
unlock_err:
- tdb_unlock_free_list(tdb, list);
+ tdb_unlock_free_bucket(tdb, b_off);
return TDB_OFF_ERR;
}
-/* We want a really big chunk. Look through every zone's oversize bucket */
-static tdb_off_t huge_alloc(struct tdb_context *tdb, size_t size,
- tdb_len_t *actual)
-{
- tdb_off_t i, off;
-
- for (i = 0; i < tdb->header.v.num_zones; i++) {
- /* Try getting one from list. */
- off = lock_and_alloc(tdb, tdb->header.v.free_buckets,
- size, actual);
- if (off == TDB_OFF_ERR)
- return TDB_OFF_ERR;
- if (off != 0)
- return off;
- /* FIXME: Coalesce! */
- }
- return 0;
-}
-
-static tdb_off_t get_free(struct tdb_context *tdb, size_t size,
- tdb_len_t *actual)
+/* Get a free block from current free list, or 0 if none. */
+static tdb_off_t get_free(struct tdb_context *tdb,
+ size_t keylen, size_t datalen, bool want_extra,
+ unsigned magic, unsigned hashlow)
{
- tdb_off_t off, bucket;
- unsigned int num_empty, step = 0;
-
- bucket = size_to_bucket(tdb, size);
-
- /* If we're after something bigger than a single zone, handle
- * specially. */
- if (unlikely(sizeof(struct tdb_used_record) + size
- >= (1ULL << tdb->header.v.zone_bits))) {
- return huge_alloc(tdb, size, actual);
- }
-
- /* Number of zones we search is proportional to the log of them. */
- for (num_empty = 0; num_empty < fls64(tdb->header.v.num_zones);
- num_empty++) {
- tdb_off_t b;
-
+ tdb_off_t off, ftable_off;
+ unsigned start_b, b, ftable;
+ bool wrapped = false;
+
+ /* If they are growing, add 50% to get to higher bucket. */
+ if (want_extra)
+ start_b = size_to_bucket(adjust_size(keylen,
+ datalen + datalen / 2));
+ else
+ start_b = size_to_bucket(adjust_size(keylen, datalen));
+
+ ftable_off = tdb->ftable_off;
+ ftable = tdb->ftable;
+ while (!wrapped || ftable_off != tdb->ftable_off) {
/* Start at exact size bucket, and search up... */
- for (b = bucket; b <= tdb->header.v.free_buckets; b++) {
- b = find_free_head(tdb, b);
-
- /* Non-empty list? Try getting block. */
- if (b <= tdb->header.v.free_buckets) {
- /* Try getting one from list. */
- off = lock_and_alloc(tdb, b, size, actual);
- if (off == TDB_OFF_ERR)
- return TDB_OFF_ERR;
- if (off != 0)
- return off;
- /* Didn't work. Try next bucket. */
+ for (b = find_free_head(tdb, ftable_off, start_b);
+ b < TDB_FREE_BUCKETS;
+ b = find_free_head(tdb, ftable_off, b + 1)) {
+ /* Try getting one from list. */
+ off = lock_and_alloc(tdb, ftable_off,
+ b, keylen, datalen, want_extra,
+ magic, hashlow);
+ if (off == TDB_OFF_ERR)
+ return TDB_OFF_ERR;
+ if (off != 0) {
+ if (b == start_b)
+ add_stat(tdb, alloc_bucket_exact, 1);
+ if (b == TDB_FREE_BUCKETS - 1)
+ add_stat(tdb, alloc_bucket_max, 1);
+ /* Worked? Stay using this list. */
+ tdb->ftable_off = ftable_off;
+ tdb->ftable = ftable;
+ return off;
}
+ /* Didn't work. Try next bucket. */
}
- /* Try another zone, at pseudo random. Avoid duplicates by
- using an odd step. */
- if (step == 0)
- step = ((quick_random(tdb)) % 65536) * 2 + 1;
- tdb->last_zone = (tdb->last_zone + step)
- % tdb->header.v.num_zones;
+ /* Hmm, try next table. */
+ ftable_off = next_ftable(tdb, ftable_off);
+ ftable++;
+
+ if (ftable_off == 0) {
+ wrapped = true;
+ ftable_off = first_ftable(tdb);
+ ftable = 0;
+ }
}
+
return 0;
}
int set_header(struct tdb_context *tdb,
struct tdb_used_record *rec,
- uint64_t keylen, uint64_t datalen,
- uint64_t actuallen, uint64_t hash)
+ unsigned magic, uint64_t keylen, uint64_t datalen,
+ uint64_t actuallen, unsigned hashlow)
{
uint64_t keybits = (fls64(keylen) + 1) / 2;
- /* Use top bits of hash, so it's independent of hash table size. */
- rec->magic_and_meta
- = (actuallen - (keylen + datalen))
- | ((hash >> 53) << 32)
+ /* Use bottom bits of hash, so it's independent of hash table size. */
+ rec->magic_and_meta = (hashlow & ((1 << 11)-1))
+ | ((actuallen - (keylen + datalen)) << 11)
| (keybits << 43)
- | (TDB_MAGIC << 48);
+ | ((uint64_t)magic << 48);
rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
/* Encoding can fail on big values. */
if (rec_key_length(rec) != keylen
|| rec_data_length(rec) != datalen
|| rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
- tdb->ecode = TDB_ERR_IO;
- tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
- "Could not encode k=%llu,d=%llu,a=%llu\n",
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR,
+ "Could not encode k=%llu,d=%llu,a=%llu",
(long long)keylen, (long long)datalen,
(long long)actuallen);
return -1;
return 0;
}
-static tdb_len_t adjust_size(size_t keylen, size_t datalen, bool growing)
-{
- tdb_len_t size = keylen + datalen;
-
- if (size < MIN_DATA_LEN)
- size = MIN_DATA_LEN;
-
- /* Overallocate if this is coming from an enlarging store. */
- if (growing)
- size += datalen / 2;
-
- /* Round to next uint64_t boundary. */
- return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
-}
-
-/* If this fails, try tdb_expand. */
-tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
- uint64_t hash, bool growing)
-{
- tdb_off_t off;
- tdb_len_t size, actual;
- struct tdb_used_record rec;
-
- /* We don't want header to change during this! */
- assert(tdb->header_uptodate);
-
- size = adjust_size(keylen, datalen, growing);
-
- off = get_free(tdb, size, &actual);
- if (unlikely(off == TDB_OFF_ERR || off == 0))
- return off;
-
- /* Some supergiant values can't be encoded. */
- if (set_header(tdb, &rec, keylen, datalen, actual, hash) != 0) {
- add_free_record(tdb, off, sizeof(rec) + actual);
- return TDB_OFF_ERR;
- }
-
- if (tdb_write_convert(tdb, off, &rec, sizeof(rec)) != 0)
- return TDB_OFF_ERR;
-
- return off;
-}
-
-static bool larger_buckets_might_help(struct tdb_context *tdb)
-{
- /* If our buckets are already covering 1/8 of a zone, don't
- * bother (note: might become an 1/16 of a zone if we double
- * zone size). */
- tdb_len_t size = (1ULL << tdb->header.v.zone_bits) / 8;
-
- if (size >= MIN_DATA_LEN
- && size_to_bucket(tdb, size) < tdb->header.v.free_buckets) {
- return false;
- }
-
- /* FIXME: Put stats in tdb_context or examine db itself! */
- /* It's fairly cheap to do as we expand database. */
- return true;
-}
-
-static bool zones_happy(struct tdb_context *tdb)
-{
- /* FIXME: look at distribution of zones. */
- return true;
-}
-
-/* Returns how much extra room we get, or TDB_OFF_ERR. */
-static tdb_len_t expand_to_fill_zones(struct tdb_context *tdb)
+/* Expand the database. */
+static int tdb_expand(struct tdb_context *tdb, tdb_len_t size)
{
- tdb_len_t add;
-
- /* We can enlarge zones without enlarging file to match. */
- add = (tdb->header.v.num_zones<<tdb->header.v.zone_bits)
- - tdb->map_size;
- if (add <= sizeof(struct tdb_free_record))
- return 0;
-
- /* Updates tdb->map_size. */
- if (tdb->methods->expand_file(tdb, add) == -1)
- return TDB_OFF_ERR;
- if (add_free_record(tdb, tdb->map_size - add, add) == -1)
- return TDB_OFF_ERR;
- return add;
-}
+ uint64_t old_size;
+ tdb_len_t wanted;
-static int update_zones(struct tdb_context *tdb,
- uint64_t new_num_zones,
- uint64_t new_zone_bits,
- uint64_t new_num_buckets,
- tdb_len_t add)
-{
- tdb_len_t freebucket_size;
- const tdb_off_t *oldf;
- tdb_off_t i, off, old_num_total, old_free_off;
- struct tdb_used_record fhdr;
+ /* We need room for the record header too. */
+ wanted = sizeof(struct tdb_used_record) + size;
- /* Updates tdb->map_size. */
- if (tdb->methods->expand_file(tdb, add) == -1)
+ /* Need to hold a hash lock to expand DB: transactions rely on it. */
+ if (!(tdb->flags & TDB_NOLOCK)
+ && !tdb->allrecord_lock.count && !tdb_has_hash_locks(tdb)) {
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ "tdb_expand: must hold lock during expand");
return -1;
+ }
- /* Use first part as new free bucket array. */
- off = tdb->map_size - add;
- freebucket_size = new_num_zones
- * (new_num_buckets + 1) * sizeof(tdb_off_t);
+ /* always make room for at least 100 more records, and at
+ least 25% more space. */
+ if (size * TDB_EXTENSION_FACTOR > tdb->map_size / 4)
+ wanted = size * TDB_EXTENSION_FACTOR;
+ else
+ wanted = tdb->map_size / 4;
+ wanted = adjust_size(0, wanted);
- /* Write header. */
- if (set_header(tdb, &fhdr, 0, freebucket_size, freebucket_size, 0))
- return -1;
- if (tdb_write_convert(tdb, off, &fhdr, sizeof(fhdr)) == -1)
+ /* Only one person can expand file at a time. */
+ if (tdb_lock_expand(tdb, F_WRLCK) != 0)
return -1;
- /* Adjust off to point to start of buckets, add to be remainder. */
- add -= freebucket_size + sizeof(fhdr);
- off += sizeof(fhdr);
+ /* Someone else may have expanded the file, so retry. */
+ old_size = tdb->map_size;
+ tdb->methods->oob(tdb, tdb->map_size + 1, true);
+ if (tdb->map_size != old_size) {
+ tdb_unlock_expand(tdb, F_WRLCK);
+ return 0;
+ }
- /* Access the old zones. */
- old_num_total = tdb->header.v.num_zones*(tdb->header.v.free_buckets+1);
- old_free_off = tdb->header.v.free_off;
- oldf = tdb_access_read(tdb, old_free_off,
- old_num_total * sizeof(tdb_off_t), true);
- if (!oldf)
+ if (tdb->methods->expand_file(tdb, wanted) == -1) {
+ tdb_unlock_expand(tdb, F_WRLCK);
return -1;
-
- /* Switch to using our new zone. */
- if (zero_out(tdb, off, freebucket_size) == -1)
- goto fail_release;
-
- tdb->header.v.free_off = off;
- tdb->header.v.num_zones = new_num_zones;
- tdb->header.v.zone_bits = new_zone_bits;
- tdb->header.v.free_buckets = new_num_buckets;
-
- /* FIXME: If zone size hasn't changed, can simply copy pointers. */
- /* FIXME: Coalesce? */
- for (i = 0; i < old_num_total; i++) {
- tdb_off_t next;
- struct tdb_free_record rec;
- tdb_off_t list;
-
- for (off = oldf[i]; off; off = next) {
- if (tdb_read_convert(tdb, off, &rec, sizeof(rec)))
- goto fail_release;
-
- list = zone_of(tdb, off)
- * (tdb->header.v.free_buckets+1)
- + size_to_bucket(tdb, rec.data_len);
- next = rec.next;
-
- if (enqueue_in_free(tdb, list, off, &rec) == -1)
- goto fail_release;
- }
}
- /* Free up the old free buckets. */
- old_free_off -= sizeof(fhdr);
- if (tdb_read_convert(tdb, old_free_off, &fhdr, sizeof(fhdr)) == -1)
- goto fail_release;
- if (add_free_record(tdb, old_free_off,
- sizeof(fhdr)
- + rec_data_length(&fhdr)
- + rec_extra_padding(&fhdr)))
- goto fail_release;
-
- /* Add the rest as a new free record. */
- if (add_free_record(tdb, tdb->map_size - add, add) == -1)
- goto fail_release;
-
- /* Start allocating from where the new space is. */
- tdb->last_zone = zone_of(tdb, tdb->map_size - add);
- tdb_access_release(tdb, oldf);
- return write_header(tdb);
-
-fail_release:
- tdb_access_release(tdb, oldf);
- return -1;
+ /* We need to drop this lock before adding free record. */
+ tdb_unlock_expand(tdb, F_WRLCK);
+
+ add_stat(tdb, expands, 1);
+ return add_free_record(tdb, old_size, wanted);
}
-/* Expand the database. */
-int tdb_expand(struct tdb_context *tdb, tdb_len_t klen, tdb_len_t dlen,
- bool growing)
+/* This won't fail: it will expand the database if it has to. */
+tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
+ uint64_t hash, unsigned magic, bool growing)
{
- uint64_t new_num_buckets, new_num_zones, new_zone_bits;
- uint64_t old_num_zones, old_size, old_zone_bits;
- tdb_len_t add, needed;
-
- /* We need room for the record header too. */
- needed = sizeof(struct tdb_used_record)
- + adjust_size(klen, dlen, growing);
-
- /* tdb_allrecord_lock will update header; did zones change? */
- old_zone_bits = tdb->header.v.zone_bits;
- old_num_zones = tdb->header.v.num_zones;
-
- /* FIXME: this is overkill. An expand lock? */
- if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
- return -1;
-
- /* Someone may have expanded for us. */
- if (old_zone_bits != tdb->header.v.zone_bits
- || old_num_zones != tdb->header.v.num_zones)
- goto success;
-
- /* They may have also expanded the underlying size (otherwise we'd
- * have expanded our mmap to look at those offsets already). */
- old_size = tdb->map_size;
- tdb->methods->oob(tdb, tdb->map_size + 1, true);
- if (tdb->map_size != old_size)
- goto success;
-
- add = expand_to_fill_zones(tdb);
- if (add == TDB_OFF_ERR)
- goto fail;
+ tdb_off_t off;
- if (add >= needed) {
- /* Allocate from this zone. */
- tdb->last_zone = zone_of(tdb, tdb->map_size - add);
- goto success;
- }
+ /* We can't hold pointers during this: we could unmap! */
+ assert(!tdb->direct_access);
- /* Slow path. Should we increase the number of buckets? */
- new_num_buckets = tdb->header.v.free_buckets;
- if (larger_buckets_might_help(tdb))
- new_num_buckets++;
-
- /* Now we'll need room for the new free buckets, too. Assume
- * worst case (zones expand). */
- needed += sizeof(struct tdb_used_record)
- + ((tdb->header.v.num_zones+1)
- * (new_num_buckets+1) * sizeof(tdb_off_t));
-
- /* If we need less that one zone, and they're working well, just add
- * another one. */
- if (needed < (1UL<<tdb->header.v.zone_bits) && zones_happy(tdb)) {
- new_num_zones = tdb->header.v.num_zones+1;
- new_zone_bits = tdb->header.v.zone_bits;
- add = 1ULL << tdb->header.v.zone_bits;
- } else {
- /* Increase the zone size. */
- new_num_zones = tdb->header.v.num_zones;
- new_zone_bits = tdb->header.v.zone_bits+1;
- while ((new_num_zones << new_zone_bits)
- < tdb->map_size + needed) {
- new_zone_bits++;
- }
+ for (;;) {
+ off = get_free(tdb, keylen, datalen, growing, magic, hash);
+ if (likely(off != 0))
+ break;
- /* We expand by enough full zones to meet the need. */
- add = ((tdb->map_size + needed + (1ULL << new_zone_bits)-1)
- & ~((1ULL << new_zone_bits)-1))
- - tdb->map_size;
+ if (tdb_expand(tdb, adjust_size(keylen, datalen)))
+ return TDB_OFF_ERR;
}
- if (update_zones(tdb, new_num_zones, new_zone_bits, new_num_buckets,
- add) == -1)
- goto fail;
-
-success:
- tdb_allrecord_unlock(tdb, F_WRLCK);
- return 0;
-
-fail:
- tdb_allrecord_unlock(tdb, F_WRLCK);
- return -1;
+ return off;
}