We don't actually need it.
unsigned int *max_zone_bits)
{
struct free_zone_header zhdr;
- tdb_off_t off, hdrlen;
+ tdb_off_t off, hdrlen, end;
tdb_len_t len;
if (tdb_read_convert(tdb, zone_off, &zhdr, sizeof(zhdr)) == -1)
return TDB_OFF_ERR;
}
- /* Zone must be within file! */
- if (tdb->methods->oob(tdb, zone_off + (1ULL << zhdr.zone_bits), false))
- return TDB_OFF_ERR;
-
+ /* Zone header must be within file! */
hdrlen = sizeof(zhdr)
+ (BUCKETS_FOR_ZONE(zhdr.zone_bits) + 1) * sizeof(tdb_off_t);
- for (off = zone_off + hdrlen;
- off < zone_off + (1ULL << zhdr.zone_bits);
- off += len) {
+
+ if (tdb->methods->oob(tdb, zone_off + hdrlen, true))
+ return TDB_OFF_ERR;
+
+ end = zone_off + (1ULL << zhdr.zone_bits);
+ if (end > tdb->map_size)
+ end = tdb->map_size;
+
+ for (off = zone_off + hdrlen; off < end; off += len) {
union {
struct tdb_used_record u;
struct tdb_free_record f;
}
}
}
- return 1ULL << zhdr.zone_bits;
+ return off - zone_off;
}
/* FIXME: call check() function. */
tdb_len_t len;
size_t num_free = 0, num_used = 0, num_found = 0;
unsigned max_zone_bits = INITIAL_ZONE_BITS;
- uint8_t tailer;
if (tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false) != 0)
return -1;
/* First we do a linear scan, checking all records. */
for (off = sizeof(struct tdb_header);
- off < tdb->map_size - 1;
+ off < tdb->map_size;
off += len) {
len = check_zone(tdb, off, &used, &num_used, &free, &num_free,
&max_zone_bits);
goto fail;
}
- /* Check tailer. */
- if (tdb->methods->read(tdb, tdb->map_size - 1, &tailer, 1) == -1)
- goto fail;
- if (tailer != max_zone_bits) {
- tdb->ecode = TDB_ERR_CORRUPT;
- tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
- "tdb_check: Bad tailer value %u vs %u\n", tailer,
- max_zone_bits);
- goto fail;
- }
-
/* FIXME: Check key uniqueness? */
if (!check_hash(tdb, used, num_used))
goto fail;
return ilog64(val);
}
+static unsigned ffs64(uint64_t val)
+{
+#if HAVE_BUILTIN_FFSLL
+ return __builtin_ffsll(val);
+#else
+ unsigned r = 0;
+
+ if (!val)
+ return 0;
+
+ if (!(val & 0xffffffff)) {
+ val >>= 32;
+ r += 32;
+ }
+ if (!(val & 0xffff)) {
+ val >>= 16;
+ r += 16;
+ }
+ if (!(val & 0xff)) {
+ val >>= 8;
+ r += 8;
+ }
+ if (!(val & 0xf)) {
+ val >>= 4;
+ r += 4;
+ }
+ if (!(val & 0x3)) {
+ val >>= 2;
+ r += 2;
+ }
+ if (!(val & 0x1)) {
+ val >>= 1;
+ r += 1;
+ }
+ return r;
+#endif
+}
+
/* In which bucket would we find a particular record size? (ignoring header) */
unsigned int size_to_bucket(unsigned int zone_bits, tdb_len_t data_len)
{
return bucket;
}
-/* Subtract 1-byte tailer and header. Then round up to next power of 2. */
-static unsigned max_zone_bits(struct tdb_context *tdb)
+/* Binary search for the zone for this offset. */
+static tdb_off_t off_to_zone(struct tdb_context *tdb, tdb_off_t off,
+ struct free_zone_header *zhdr)
{
- return fls64(tdb->map_size-1-sizeof(struct tdb_header)-1) + 1;
-}
+ tdb_off_t start, end;
-/* Start by using a random zone to spread the load: returns the offset. */
-static uint64_t random_zone(struct tdb_context *tdb)
-{
- struct free_zone_header zhdr;
- tdb_off_t off = sizeof(struct tdb_header);
- tdb_len_t half_bits;
- uint64_t randbits = 0;
- unsigned int i;
+ start = sizeof(struct tdb_header);
+ end = start + (1ULL << fls64(tdb->map_size - start));
- for (i = 0; i < 64; i += fls64(RAND_MAX))
- randbits ^= ((uint64_t)random()) << i;
-
- /* FIXME: Does this work? Test! */
- half_bits = max_zone_bits(tdb) - 1;
- do {
- /* Pick left or right side (not outside file) */
- if ((randbits & 1)
- && !tdb->methods->oob(tdb, off + (1ULL << half_bits)
- + sizeof(zhdr), true)) {
- off += 1ULL << half_bits;
- }
- randbits >>= 1;
-
- if (tdb_read_convert(tdb, off, &zhdr, sizeof(zhdr)) == -1)
+ for (;;) {
+ if (tdb_read_convert(tdb, start, zhdr, sizeof(*zhdr)) == -1)
return TDB_OFF_ERR;
- if (zhdr.zone_bits == half_bits)
- return off;
+ /* Is it inside this zone? */
+ if (off < start + (1ULL << zhdr->zone_bits))
+ return start;
- half_bits--;
- } while (half_bits >= INITIAL_ZONE_BITS);
+ /* In practice, start + end won't overflow. */
+ if (off >= (start + end) / 2)
+ start = (start + end) / 2;
+ else
+ end = (start + end) / 2;
+ }
+}
- tdb->ecode = TDB_ERR_CORRUPT;
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "random_zone: zone at %llu smaller than %u bits?",
- (long long)off, INITIAL_ZONE_BITS);
- return TDB_OFF_ERR;
+static tdb_off_t last_zone(struct tdb_context *tdb,
+ struct free_zone_header *zhdr)
+{
+ return off_to_zone(tdb, tdb->map_size - 1, zhdr);
}
int tdb_zone_init(struct tdb_context *tdb)
{
- tdb->zone_off = random_zone(tdb);
+ unsigned int i;
+ uint64_t randoff = 0;
+
+ /* We start in a random zone, to spread the load. */
+ for (i = 0; i < 64; i += fls64(RAND_MAX))
+ randoff ^= ((uint64_t)random()) << i;
+ randoff = sizeof(struct tdb_header)
+ + (randoff % (tdb->map_size - sizeof(struct tdb_header)));
+
+ tdb->zone_off = off_to_zone(tdb, randoff, &tdb->zhdr);
if (tdb->zone_off == TDB_OFF_ERR)
return -1;
- if (tdb_read_convert(tdb, tdb->zone_off,
- &tdb->zhdr, sizeof(tdb->zhdr)) == -1)
- return -1;
return 0;
}
int ret;
assert(len_with_header >= sizeof(new));
- assert(zone_bits < (1 << 6));
+ assert(zone_bits < 64);
new.magic_and_meta = TDB_FREE_MAGIC | zone_bits;
new.data_len = len_with_header - sizeof(struct tdb_used_record);
tdb_off_t off, tdb_off_t b_off, tdb_len_t data_len)
{
struct tdb_free_record pad, *r;
- tdb_off_t end = off + sizeof(struct tdb_used_record) + data_len;
+ tdb_off_t zone_end, end;
+
+ end = off + sizeof(struct tdb_used_record) + data_len;
+ zone_end = zone_off + (1ULL << zone_bits);
- while (end < (zone_off + (1ULL << zone_bits))) {
+ if (tdb->methods->oob(tdb, zone_end, true))
+ zone_end = tdb->map_size;
+
+ while (end < zone_end) {
tdb_off_t nb_off;
/* FIXME: do tdb_get here and below really win? */
return 0;
}
-static bool zones_happy(struct tdb_context *tdb)
+static bool zones_contended(struct tdb_context *tdb)
{
- /* FIXME: look at distribution of zones. */
- return true;
+ return false;
}
/* Assume we want buckets up to the comfort factor. */
{
uint64_t old_size;
tdb_off_t off;
- uint8_t zone_bits;
- unsigned int num_buckets;
- tdb_len_t wanted;
+ unsigned int num_buckets, zone_bits;
+ tdb_len_t wanted, expand;
struct free_zone_header zhdr;
- bool enlarge_zone;
/* We need room for the record header too. */
wanted = sizeof(struct tdb_used_record) + size;
if (tdb->map_size != old_size)
goto success;
- /* FIXME: Tailer is a bogus optimization, remove it. */
- /* zone bits tailer char is protected by EXPAND lock. */
- if (tdb->methods->read(tdb, old_size - 1, &zone_bits, 1) == -1)
+ /* Treat last zone as minimum reasonable zone size. */
+ off = last_zone(tdb, &zhdr);
+ if (off == TDB_OFF_ERR)
goto fail;
- /* If zones aren't working well, add larger zone if possible. */
- enlarge_zone = !zones_happy(tdb);
+ /* Zone isn't fully expanded? */
+ if (tdb->map_size < off + (1ULL << zhdr.zone_bits)) {
+ expand = off + (1ULL << zhdr.zone_bits) - tdb->map_size;
+ /* Expand more than we want. */
+ if (expand > (wanted << TDB_COMFORT_FACTOR_BITS))
+ expand = (wanted << TDB_COMFORT_FACTOR_BITS);
+ if (tdb->methods->expand_file(tdb, expand) == -1)
+ goto fail;
+ /* We need to drop this lock before adding free record. */
+ tdb_unlock_expand(tdb, F_WRLCK);
+
+ /* Allocate from here. */
+ tdb->zone_off = off;
+ tdb->zhdr = zhdr;
+
+ /* FIXME: If this isn't sufficient, we search again... */
+ return add_free_record(tdb, zhdr.zone_bits,
+ tdb->map_size - expand, expand);
+ }
- /* New zone can be between zone_bits or larger if we're on the right
- * boundary. */
- for (;;) {
- /* Does this fit the allocation comfortably? */
- if ((1ULL << zone_bits) >= overhead(zone_bits) + wanted) {
- /* Only let enlarge_zone enlarge us once. */
- if (!enlarge_zone)
- break;
- enlarge_zone = false;
- }
- if ((old_size - 1 - sizeof(struct tdb_header))
- & (1 << zone_bits))
- break;
- zone_bits++;
+ /* We are never allowed to cross a power-of-two boundary, and our
+ * minimum zone size is 1 << INITIAL_ZONE_BITS.
+ *
+ * If our filesize is 128k, we can add a 64k or a 128k zone. If it's
+ * 192k, we can only add a 64k zone.
+ *
+ * In other words, our max zone size is (1 << (ffs(filesize) - 1)) */
+ zone_bits = ffs64(old_size - sizeof(struct tdb_header)) - 1;
+ assert(zone_bits >= INITIAL_ZONE_BITS);
+
+ /* Big zones generally good, but more zones wanted if contended. */
+ if (zones_contended(tdb)) {
+ /* If it suffices, make zone same size as last one. */
+ if (zhdr.zone_bits < zone_bits
+ && (1ULL << zhdr.zone_bits) >= overhead(zone_bits)+wanted)
+ zone_bits = zhdr.zone_bits;
}
zhdr.zone_bits = zone_bits;
num_buckets = BUCKETS_FOR_ZONE(zone_bits);
- /* FIXME: I don't think we need to expand to full zone, do we? */
- if (tdb->methods->expand_file(tdb, 1ULL << zone_bits) == -1)
- goto fail;
+ /* Expand the file by more than we need right now. */
+ expand = 1ULL << zone_bits;
+ if (expand > overhead(zone_bits) + (wanted << TDB_COMFORT_FACTOR_BITS))
+ expand = overhead(zone_bits)
+ + (wanted << TDB_COMFORT_FACTOR_BITS);
- /* Write new tailer. */
- if (tdb->methods->write(tdb, tdb->map_size - 1, &zone_bits, 1) == -1)
+ if (tdb->methods->expand_file(tdb, expand) == -1)
goto fail;
- /* Write new zone header (just before old tailer). */
- off = old_size - 1;
+ /* Write new zone header (at old end). */
+ off = old_size;
if (tdb_write_convert(tdb, off, &zhdr, sizeof(zhdr)) == -1)
goto fail;
off += (num_buckets+1) * sizeof(tdb_off_t);
/* Now add the rest as our free record. */
- if (add_free_record(tdb, zone_bits, off, tdb->map_size-1-off) == -1)
+ if (add_free_record(tdb, zone_bits, off, expand - overhead(zone_bits))
+ == -1)
goto fail;
/* Try allocating from this zone now. */
- tdb->zone_off = old_size - 1;
+ tdb->zone_off = old_size;
tdb->zhdr = zhdr;
success:
return 0;
}
-static void *tdb_direct(struct tdb_context *tdb, tdb_off_t off, size_t len)
-{
- if (unlikely(!tdb->map_ptr))
- return NULL;
-
- /* FIXME: We can do a subset of this! */
- if (tdb->transaction)
- return NULL;
-
- if (unlikely(tdb_oob(tdb, off + len, true) == -1))
- return NULL;
- return (char *)tdb->map_ptr + off;
-}
-
/* Either make a copy into pad and return that, or return ptr into mmap. */
/* Note: pad has to be a real object, so we can't get here if len
* overflows size_t */
void *tdb_get(struct tdb_context *tdb, tdb_off_t off, void *pad, size_t len)
{
if (likely(!(tdb->flags & TDB_CONVERT))) {
- void *ret = tdb_direct(tdb, off, len);
+ void *ret = tdb->methods->direct(tdb, off, len);
if (ret)
return ret;
}
int zero_out(struct tdb_context *tdb, tdb_off_t off, tdb_len_t len)
{
char buf[8192] = { 0 };
- void *p = tdb_direct(tdb, off, len);
+ void *p = tdb->methods->direct(tdb, off, len);
if (p) {
memset(p, 0, len);
return 0;
const void *ret = NULL;
if (likely(!(tdb->flags & TDB_CONVERT)))
- ret = tdb_direct(tdb, off, len);
+ ret = tdb->methods->direct(tdb, off, len);
if (!ret) {
struct tdb_access_hdr *hdr;
void *ret = NULL;
if (likely(!(tdb->flags & TDB_CONVERT)))
- ret = tdb_direct(tdb, off, len);
+ ret = tdb->methods->direct(tdb, off, len);
if (!ret) {
struct tdb_access_hdr *hdr;
}
#endif
+static void *tdb_direct(struct tdb_context *tdb, tdb_off_t off, size_t len)
+{
+ if (unlikely(!tdb->map_ptr))
+ return NULL;
+
+ if (unlikely(tdb_oob(tdb, off + len, true) == -1))
+ return NULL;
+ return (char *)tdb->map_ptr + off;
+}
+
static const struct tdb_methods io_methods = {
tdb_read,
tdb_write,
tdb_oob,
tdb_expand_file,
+ tdb_direct,
};
/*
int (*write)(struct tdb_context *, tdb_off_t, const void *, tdb_len_t);
int (*oob)(struct tdb_context *, tdb_off_t, bool);
int (*expand_file)(struct tdb_context *, tdb_len_t);
+ void *(*direct)(struct tdb_context *, tdb_off_t, size_t);
};
/*
unsigned int *num_buckets)
{
struct free_zone_header zhdr;
- tdb_off_t off;
+ tdb_off_t off, end;
tdb_len_t len;
unsigned int hdrlen;
tdb_len_t unc = 0;
hdrlen = sizeof(zhdr)
+ (BUCKETS_FOR_ZONE(zhdr.zone_bits) + 1) * sizeof(tdb_off_t);
- for (off = zone_off + hdrlen;
- off < zone_off + (1ULL << zhdr.zone_bits);
- off += len) {
+
+ end = zone_off + (1ULL << zhdr.zone_bits);
+ if (end > tdb->map_size)
+ end = tdb->map_size;
+
+ for (off = zone_off + hdrlen; off < end; off += len) {
union {
struct tdb_used_record u;
struct tdb_free_record f;
/* Initial free zone. */
struct free_zone_header zhdr;
tdb_off_t free[BUCKETS_FOR_ZONE(INITIAL_ZONE_BITS) + 1];
- struct tdb_free_record frec;
- /* Rest up to 1 << INITIAL_ZONE_BITS is empty. */
- char space[(1 << INITIAL_ZONE_BITS)
- - sizeof(struct free_zone_header)
- - sizeof(tdb_off_t) * (BUCKETS_FOR_ZONE(INITIAL_ZONE_BITS)+1)
- - sizeof(struct tdb_free_record)];
- uint8_t tailer;
- /* Don't count final padding! */
};
/* initialise a new database */
{
/* We make it up in memory, then write it out if not internal */
struct new_database newdb;
- unsigned int bucket, magic_len, dbsize;
-
- /* Don't want any extra padding! */
- dbsize = offsetof(struct new_database, tailer) + sizeof(newdb.tailer);
+ unsigned int magic_len;
/* Fill in the header */
newdb.hdr.version = TDB_VERSION;
/* Initial hashes are empty. */
memset(newdb.hdr.hashtable, 0, sizeof(newdb.hdr.hashtable));
- /* Free is mostly empty... */
+ /* Free is empty. */
newdb.zhdr.zone_bits = INITIAL_ZONE_BITS;
memset(newdb.free, 0, sizeof(newdb.free));
- /* Create the single free entry. */
- newdb.frec.magic_and_meta = TDB_FREE_MAGIC | INITIAL_ZONE_BITS;
- newdb.frec.data_len = (sizeof(newdb.frec)
- - sizeof(struct tdb_used_record)
- + sizeof(newdb.space));
-
- /* Add it to the correct bucket. */
- bucket = size_to_bucket(INITIAL_ZONE_BITS, newdb.frec.data_len);
- newdb.free[bucket] = offsetof(struct new_database, frec);
- newdb.frec.next = newdb.frec.prev = 0;
-
- /* Clear free space to keep valgrind happy, and avoid leaking stack. */
- memset(newdb.space, 0, sizeof(newdb.space));
-
- /* Tailer contains maximum number of free_zone bits. */
- newdb.tailer = INITIAL_ZONE_BITS;
-
/* Magic food */
memset(newdb.hdr.magic_food, 0, sizeof(newdb.hdr.magic_food));
strcpy(newdb.hdr.magic_food, TDB_MAGIC_FOOD);
/* This creates an endian-converted database, as if read from disk */
magic_len = sizeof(newdb.hdr.magic_food);
tdb_convert(tdb,
- (char *)&newdb.hdr + magic_len,
- offsetof(struct new_database, space) - magic_len);
+ (char *)&newdb.hdr + magic_len, sizeof(newdb) - magic_len);
*hdr = newdb.hdr;
if (tdb->flags & TDB_INTERNAL) {
- tdb->map_size = dbsize;
+ tdb->map_size = sizeof(newdb);
tdb->map_ptr = malloc(tdb->map_size);
if (!tdb->map_ptr) {
tdb->ecode = TDB_ERR_OOM;
if (ftruncate(tdb->fd, 0) == -1)
return -1;
- if (!tdb_pwrite_all(tdb->fd, &newdb, dbsize, 0)) {
+ if (!tdb_pwrite_all(tdb->fd, &newdb, sizeof(newdb), 0)) {
tdb->ecode = TDB_ERR_IO;
return -1;
}
zone_left -= len;
}
- /* Fill final zone with free record. */
- if (zone_left != 0) {
- tdb_layout_add_free(layout,
- zone_left
- - sizeof(struct tdb_used_record));
- layout->elem[layout->num_elems-1].base.off = off;
- off += zone_left;
- }
-
- mem = malloc(off+1);
+ mem = malloc(off);
/* Now populate our header, cribbing from a real TDB header. */
tdb = tdb_open(NULL, TDB_INTERNAL, O_RDWR, 0, &tap_log_attr);
memcpy(mem, tdb->map_ptr, sizeof(struct tdb_header));
/* Mug the tdb we have to make it use this. */
free(tdb->map_ptr);
tdb->map_ptr = mem;
- tdb->map_size = off+1;
+ tdb->map_size = off;
for (i = 0; i < layout->num_elems; i++) {
union tdb_layout_elem *e = &layout->elem[i];
}
}
- /* Write tailer. */
- ((uint8_t *)tdb->map_ptr)[tdb->map_size-1] = last_zone->zone_bits;
-
/* Get physical if they asked for it. */
if (layout->filename) {
int fd = open(layout->filename, O_WRONLY|O_TRUNC|O_CREAT,
tdb = tdb_open(layout->filename, TDB_NOMMAP, O_RDWR, 0,
&tap_log_attr);
}
+
return tdb;
}
--- /dev/null
+#include <ccan/tdb2/tdb.c>
+#include <ccan/tdb2/free.c>
+#include <ccan/tdb2/lock.c>
+#include <ccan/tdb2/io.c>
+#include <ccan/tdb2/hash.c>
+#include <ccan/tdb2/check.c>
+#include <ccan/tap/tap.h>
+#include "logging.h"
+#include "layout.h"
+
+/* Calculate start of zone offset from layout directly. */
+static tdb_off_t layout_zone_off(tdb_off_t off, struct tdb_layout *layout)
+{
+ unsigned int i;
+
+ /* Every second one is a free entry, so divide by 2 to get zone */
+ for (i = 0; i < layout->num_elems; i++) {
+ if (layout->elem[i].base.type != ZONE)
+ continue;
+ if (layout->elem[i].base.off
+ + (1ULL << layout->elem[i].zone.zone_bits) > off)
+ return layout->elem[i].base.off;
+ }
+ abort();
+}
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ struct tdb_layout *layout;
+ struct free_zone_header zhdr;
+ tdb_off_t off, step;
+ unsigned int i;
+
+ /* FIXME: Test TDB_CONVERT */
+
+ plan_tests(3 + 100);
+
+ /* No coalescing can be done due to EOF */
+ layout = new_tdb_layout(NULL);
+ tdb_layout_add_zone(layout, INITIAL_ZONE_BITS, false);
+ tdb_layout_add_zone(layout, INITIAL_ZONE_BITS, true);
+ tdb_layout_add_zone(layout, INITIAL_ZONE_BITS+1, true);
+ tdb_layout_add_zone(layout, INITIAL_ZONE_BITS+2, true);
+ tdb_layout_add_zone(layout, INITIAL_ZONE_BITS+2, true);
+ tdb = tdb_layout_get(layout);
+
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+
+ /* Last zone should get right zone. */
+ ok1(last_zone(tdb, &zhdr)
+ == layout->elem[layout->num_elems-1].base.off);
+ ok1(zhdr.zone_bits == INITIAL_ZONE_BITS+2);
+
+ off = sizeof(struct tdb_header);
+ step = (tdb->map_size - 1 - off) / 100;
+ for (i = 0; i < 100; i++, off += step) {
+ ok1(off_to_zone(tdb, off, &zhdr) == layout_zone_off(off, layout));
+ }
+
+ return exit_status();
+}
if (!tdb)
continue;
- /* First expand. Should add a zone, doubling file size.. */
- val = tdb->map_size - 1 - sizeof(struct tdb_header);
+ /* First expand. Should not fill zone. */
+ val = tdb->map_size - sizeof(struct tdb_header);
ok1(tdb_expand(tdb, 1) == 0);
- ok1(tdb->map_size == 2 * val + 1 + sizeof(struct tdb_header));
+ ok1(tdb->map_size < sizeof(struct tdb_header)
+ + (1 << INITIAL_ZONE_BITS));
ok1(tdb_check(tdb, NULL, NULL) == 0);
- /* Second expand, add another zone of same size. */
- ok1(tdb_expand(tdb, 1) == 0);
- ok1(tdb->map_size == 3 * val + 1 + sizeof(struct tdb_header));
+ /* Fill zone. */
+ val = (1<<INITIAL_ZONE_BITS)
+ - sizeof(struct tdb_used_record)
+ - (tdb->map_size - sizeof(struct tdb_header));
+ ok1(tdb_expand(tdb, val) == 0);
+ ok1(tdb->map_size == sizeof(struct tdb_header)
+ + (1 << INITIAL_ZONE_BITS));
ok1(tdb_check(tdb, NULL, NULL) == 0);
- /* Large expand, but can only add 4th zone of same size. */
- ok1(tdb_expand(tdb, 4*val) == 0);
- ok1(tdb->map_size == 4 * val + 1 + sizeof(struct tdb_header));
+ /* Second expand, adds another zone of same size. */
+ ok1(tdb_expand(tdb, 4 << INITIAL_ZONE_BITS) == 0);
+ ok1(tdb->map_size ==
+ (2<<INITIAL_ZONE_BITS) + sizeof(struct tdb_header));
ok1(tdb_check(tdb, NULL, NULL) == 0);
/* Large expand now will double file. */
- ok1(tdb_expand(tdb, 4*val) == 0);
- ok1(tdb->map_size == 8 * val + 1 + sizeof(struct tdb_header));
+ ok1(tdb_expand(tdb, 4 << INITIAL_ZONE_BITS) == 0);
+ ok1(tdb->map_size ==
+ (4<<INITIAL_ZONE_BITS) + sizeof(struct tdb_header));
ok1(tdb_check(tdb, NULL, NULL) == 0);
/* And again? */
- ok1(tdb_expand(tdb, 4*val) == 0);
- ok1(tdb->map_size == 16 * val + 1 + sizeof(struct tdb_header));
+ ok1(tdb_expand(tdb, 4 << INITIAL_ZONE_BITS) == 0);
+ ok1(tdb->map_size ==
+ (8<<INITIAL_ZONE_BITS) + sizeof(struct tdb_header));
ok1(tdb_check(tdb, NULL, NULL) == 0);
- /* Below comfort level, will add a single 8*val zone. */
- ok1(tdb_expand(tdb, ((8*val) >> TDB_COMFORT_FACTOR_BITS)
+ /* Below comfort level, won't fill zone. */
+ ok1(tdb_expand(tdb,
+ ((3 << INITIAL_ZONE_BITS)
+ >> TDB_COMFORT_FACTOR_BITS)
- sizeof(struct tdb_used_record)) == 0);
- ok1(tdb->map_size == 24 * val + 1 + sizeof(struct tdb_header));
+ ok1(tdb->map_size < (12<<INITIAL_ZONE_BITS)
+ + sizeof(struct tdb_header));
tdb_close(tdb);
}
/* No coalescing can be done due to EOF */
layout = new_tdb_layout(NULL);
tdb_layout_add_zone(layout, zone_bits, false);
+ len = 1024;
+ tdb_layout_add_free(layout, len);
tdb = tdb_layout_get(layout);
- len = layout->elem[1].free.len;
zone_off = layout->elem[0].base.off;
ok1(tdb_check(tdb, NULL, NULL) == 0);
ok1(free_record_length(tdb, layout->elem[1].base.off) == len);
layout = new_tdb_layout(NULL);
tdb_layout_add_zone(layout, zone_bits, false);
tdb_layout_add_free(layout, 1024);
+ tdb_layout_add_free(layout, 2048);
tdb = tdb_layout_get(layout);
zone_off = layout->elem[0].base.off;
- len = layout->elem[2].free.len;
ok1(free_record_length(tdb, layout->elem[1].base.off) == 1024);
- ok1(free_record_length(tdb, layout->elem[2].base.off) == len);
+ ok1(free_record_length(tdb, layout->elem[2].base.off) == 2048);
ok1(tdb_check(tdb, NULL, NULL) == 0);
/* Figure out which bucket (first) free entry is. */
b_off, 1024) == 1);
ok1(!tdb_has_locks(tdb));
ok1(free_record_length(tdb, layout->elem[1].base.off)
- == 1024 + sizeof(struct tdb_used_record) + len);
+ == 1024 + sizeof(struct tdb_used_record) + 2048);
ok1(tdb_check(tdb, NULL, NULL) == 0);
tdb_close(tdb);
tdb_layout_add_zone(layout, zone_bits, false);
tdb_layout_add_free(layout, 1024);
tdb_layout_add_free(layout, 512);
+ tdb_layout_add_free(layout, 256);
tdb = tdb_layout_get(layout);
zone_off = layout->elem[0].base.off;
- len = layout->elem[3].free.len;
ok1(free_record_length(tdb, layout->elem[1].base.off) == 1024);
ok1(free_record_length(tdb, layout->elem[2].base.off) == 512);
- ok1(free_record_length(tdb, layout->elem[3].base.off) == len);
+ ok1(free_record_length(tdb, layout->elem[3].base.off) == 256);
ok1(tdb_check(tdb, NULL, NULL) == 0);
/* Figure out which bucket free entry is. */
ok1(!tdb_has_locks(tdb));
ok1(free_record_length(tdb, layout->elem[1].base.off)
== 1024 + sizeof(struct tdb_used_record) + 512
- + sizeof(struct tdb_used_record) + len);
+ + sizeof(struct tdb_used_record) + 256);
ok1(tdb_check(tdb, NULL, NULL) == 0);
tdb_close(tdb);
d.dptr = malloc(d.dsize);
ok1(tdb_store(tdb, k, d, TDB_INSERT) == 0);
ok1(tdb->map_size == sizeof(struct tdb_header)
- + (1 << INITIAL_ZONE_BITS)+1);
+ + (1 << INITIAL_ZONE_BITS));
/* Insert minimal-length records until we add a zone. */
for (j = 0;
tdb->map_size == sizeof(struct tdb_header)
- + (1 << INITIAL_ZONE_BITS)+1;
+ + (1 << INITIAL_ZONE_BITS);
j++) {
if (tdb_store(tdb, k, k, TDB_INSERT) != 0)
err(1, "Failed to store record %i", j);