Rather than overloading TDB_USED_MAGIC and the hash value as we do now.
We also rename "free list" to the more-accurate "free table" everywhere.
if (tdb_read_convert(tdb, off, &rec, sizeof(rec)) == -1)
return false;
+ if (rec_magic(&rec) != TDB_CHAIN_MAGIC) {
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ "tdb_check: Bad hash chain magic %llu",
+ (long long)rec_magic(&rec));
+ return false;
+ }
+
if (rec_data_length(&rec) != sizeof(struct tdb_chain)) {
tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
"tdb_check: Bad hash chain length %llu vs %zu",
(long long)rec_key_length(&rec));
return false;
}
- if (rec_hash(&rec) != 2) {
+ if (rec_hash(&rec) != 0) {
tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
"tdb_check: Bad hash chain hash value %llu",
(long long)rec_hash(&rec));
if (tdb_read_convert(tdb, off, &rec, sizeof(rec)) == -1)
return false;
+ if (rec_magic(&rec) != TDB_HTABLE_MAGIC) {
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ "tdb_check: Bad hash table magic %llu",
+ (long long)rec_magic(&rec));
+ return false;
+ }
if (rec_data_length(&rec)
!= sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS) {
tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
static bool check_hash(struct tdb_context *tdb,
tdb_off_t used[],
- size_t num_used, size_t num_flists,
+ size_t num_used, size_t num_ftables,
int (*check)(TDB_DATA, TDB_DATA, void *),
void *private_data)
{
- /* Free lists also show up as used. */
- size_t num_found = num_flists;
+ /* Free tables also show up as used. */
+ size_t num_found = num_ftables;
if (!check_hash_tree(tdb, offsetof(struct tdb_header, hashtable),
TDB_TOPLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS,
static bool check_free(struct tdb_context *tdb,
tdb_off_t off,
const struct tdb_free_record *frec,
- tdb_off_t prev, unsigned int flist, unsigned int bucket)
+ tdb_off_t prev, unsigned int ftable,
+ unsigned int bucket)
{
if (frec_magic(frec) != TDB_FREE_MAGIC) {
tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
(long long)off, (long long)frec->magic_and_prev);
return false;
}
- if (frec_flist(frec) != flist) {
+ if (frec_ftable(frec) != ftable) {
tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
- "tdb_check: offset %llu bad freelist %u",
- (long long)off, frec_flist(frec));
+ "tdb_check: offset %llu bad freetable %u",
+ (long long)off, frec_ftable(frec));
return false;
}
return true;
}
-static bool check_free_list(struct tdb_context *tdb,
- tdb_off_t flist_off,
- unsigned flist_num,
- tdb_off_t free[],
- size_t num_free,
- size_t *num_found)
+static bool check_free_table(struct tdb_context *tdb,
+ tdb_off_t ftable_off,
+ unsigned ftable_num,
+ tdb_off_t free[],
+ size_t num_free,
+ size_t *num_found)
{
- struct tdb_freelist flist;
+ struct tdb_freetable ft;
tdb_off_t h;
unsigned int i;
- if (tdb_read_convert(tdb, flist_off, &flist, sizeof(flist)) == -1)
+ if (tdb_read_convert(tdb, ftable_off, &ft, sizeof(ft)) == -1)
return false;
- if (rec_magic(&flist.hdr) != TDB_MAGIC
- || rec_key_length(&flist.hdr) != 0
- || rec_data_length(&flist.hdr) != sizeof(flist) - sizeof(flist.hdr)
- || rec_hash(&flist.hdr) != 1) {
+ if (rec_magic(&ft.hdr) != TDB_FTABLE_MAGIC
+ || rec_key_length(&ft.hdr) != 0
+ || rec_data_length(&ft.hdr) != sizeof(ft) - sizeof(ft.hdr)
+ || rec_hash(&ft.hdr) != 0) {
tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
- "tdb_check: Invalid header on free list");
+ "tdb_check: Invalid header on free table");
return false;
}
tdb_off_t off, prev = 0, *p;
struct tdb_free_record f;
- h = bucket_off(flist_off, i);
+ h = bucket_off(ftable_off, i);
for (off = tdb_read_off(tdb, h); off; off = f.next) {
if (off == TDB_OFF_ERR)
return false;
if (tdb_read_convert(tdb, off, &f, sizeof(f)))
return false;
- if (!check_free(tdb, off, &f, prev, flist_num, i))
+ if (!check_free(tdb, off, &f, prev, ftable_num, i))
return false;
/* FIXME: Check hash bits */
return false;
}
/* This record should be in free lists. */
- if (frec_flist(&rec.f) != TDB_FLIST_NONE
+ if (frec_ftable(&rec.f) != TDB_FTABLE_NONE
&& !append(free, num_free, off))
return false;
- } else {
+ } else if (rec_magic(&rec.u) == TDB_USED_MAGIC
+ || rec_magic(&rec.u) == TDB_CHAIN_MAGIC
+ || rec_magic(&rec.u) == TDB_HTABLE_MAGIC
+ || rec_magic(&rec.u) == TDB_FTABLE_MAGIC) {
uint64_t klen, dlen, extra;
/* This record is used! */
- if (rec_magic(&rec.u) != TDB_MAGIC) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT,
- TDB_DEBUG_ERROR,
- "tdb_check: Bad magic 0x%llx"
- " at offset %zu",
- (long long)rec_magic(&rec.u),
- (size_t)off);
- return false;
- }
-
if (!append(used, num_used, off))
return false;
(long long)len, (long long)off);
return false;
}
+ } else {
+ tdb_logerr(tdb, TDB_ERR_CORRUPT,
+ TDB_DEBUG_ERROR,
+ "tdb_check: Bad magic 0x%llx at offset %zu",
+ (long long)rec_magic(&rec.u), (size_t)off);
+ return false;
}
}
int (*check)(TDB_DATA key, TDB_DATA data, void *private_data),
void *private_data)
{
- tdb_off_t *free = NULL, *used = NULL, flist, recovery;
- size_t num_free = 0, num_used = 0, num_found = 0, num_flists = 0;
+ tdb_off_t *free = NULL, *used = NULL, ft, recovery;
+ size_t num_free = 0, num_used = 0, num_found = 0, num_ftables = 0;
if (tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false) != 0)
return -1;
if (!check_linear(tdb, &used, &num_used, &free, &num_free, recovery))
goto fail;
- for (flist = first_flist(tdb); flist; flist = next_flist(tdb, flist)) {
- if (flist == TDB_OFF_ERR)
+ for (ft = first_ftable(tdb); ft; ft = next_ftable(tdb, ft)) {
+ if (ft == TDB_OFF_ERR)
goto fail;
- if (!check_free_list(tdb, flist, num_flists, free, num_free,
- &num_found))
+ if (!check_free_table(tdb, ft, num_ftables, free, num_free,
+ &num_found))
goto fail;
- num_flists++;
+ num_ftables++;
}
/* FIXME: Check key uniqueness? */
- if (!check_hash(tdb, used, num_used, num_flists, check, private_data))
+ if (!check_hash(tdb, used, num_used, num_ftables, check, private_data))
goto fail;
if (num_found != num_free) {
return bucket;
}
-tdb_off_t first_flist(struct tdb_context *tdb)
+tdb_off_t first_ftable(struct tdb_context *tdb)
{
- return tdb_read_off(tdb, offsetof(struct tdb_header, free_list));
+ return tdb_read_off(tdb, offsetof(struct tdb_header, free_table));
}
-tdb_off_t next_flist(struct tdb_context *tdb, tdb_off_t flist)
+tdb_off_t next_ftable(struct tdb_context *tdb, tdb_off_t ftable)
{
- return tdb_read_off(tdb, flist + offsetof(struct tdb_freelist, next));
+ return tdb_read_off(tdb, ftable + offsetof(struct tdb_freetable,next));
}
-int tdb_flist_init(struct tdb_context *tdb)
+int tdb_ftable_init(struct tdb_context *tdb)
{
/* Use reservoir sampling algorithm to select a free list at random. */
unsigned int rnd, max = 0, count = 0;
tdb_off_t off;
- tdb->flist_off = off = first_flist(tdb);
- tdb->flist = 0;
+ tdb->ftable_off = off = first_ftable(tdb);
+ tdb->ftable = 0;
while (off) {
if (off == TDB_OFF_ERR)
rnd = random();
if (rnd >= max) {
- tdb->flist_off = off;
- tdb->flist = count;
+ tdb->ftable_off = off;
+ tdb->ftable = count;
max = rnd;
}
- off = next_flist(tdb, off);
+ off = next_ftable(tdb, off);
count++;
}
return 0;
}
/* Offset of a given bucket. */
-tdb_off_t bucket_off(tdb_off_t flist_off, unsigned bucket)
+tdb_off_t bucket_off(tdb_off_t ftable_off, unsigned bucket)
{
- return flist_off + offsetof(struct tdb_freelist, buckets)
+ return ftable_off + offsetof(struct tdb_freetable, buckets)
+ bucket * sizeof(tdb_off_t);
}
/* Returns free_buckets + 1, or list number to search. */
static tdb_off_t find_free_head(struct tdb_context *tdb,
- tdb_off_t flist_off,
+ tdb_off_t ftable_off,
tdb_off_t bucket)
{
/* Speculatively search for a non-zero bucket. */
- return tdb_find_nonzero_off(tdb, bucket_off(flist_off, 0),
+ return tdb_find_nonzero_off(tdb, bucket_off(ftable_off, 0),
bucket, TDB_FREE_BUCKETS);
}
struct tdb_free_record new;
uint64_t magic = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL));
- /* We only need to set flist_and_len; rest is set in enqueue_in_free */
- new.flist_and_len = ((uint64_t)tdb->flist << (64 - TDB_OFF_UPPER_STEAL))
+ /* We only need to set ftable_and_len; rest is set in enqueue_in_free */
+ new.ftable_and_len = ((uint64_t)tdb->ftable << (64 - TDB_OFF_UPPER_STEAL))
| len;
/* prev = 0. */
new.magic_and_prev = magic;
len = len_with_header - sizeof(struct tdb_used_record);
- b_off = bucket_off(tdb->flist_off, size_to_bucket(len));
+ b_off = bucket_off(tdb->ftable_off, size_to_bucket(len));
if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) != 0)
return -1;
return leftover;
}
-static tdb_off_t flist_offset(struct tdb_context *tdb, unsigned int flist)
+static tdb_off_t ftable_offset(struct tdb_context *tdb, unsigned int ftable)
{
tdb_off_t off;
unsigned int i;
- if (likely(tdb->flist == flist))
- return tdb->flist_off;
+ if (likely(tdb->ftable == ftable))
+ return tdb->ftable_off;
- off = first_flist(tdb);
- for (i = 0; i < flist; i++)
- off = next_flist(tdb, off);
+ off = first_ftable(tdb);
+ for (i = 0; i < ftable; i++)
+ off = next_ftable(tdb, off);
return off;
}
while (end < tdb->map_size) {
const struct tdb_free_record *r;
tdb_off_t nb_off;
- unsigned flist, bucket;
+ unsigned ftable, bucket;
r = tdb_access_read(tdb, end, sizeof(*r), true);
if (!r)
goto err;
if (frec_magic(r) != TDB_FREE_MAGIC
- || frec_flist(r) == TDB_FLIST_NONE) {
+ || frec_ftable(r) == TDB_FTABLE_NONE) {
tdb_access_release(tdb, r);
break;
}
- flist = frec_flist(r);
+ ftable = frec_ftable(r);
bucket = size_to_bucket(frec_len(r));
- nb_off = bucket_off(flist_offset(tdb, flist), bucket);
+ nb_off = bucket_off(ftable_offset(tdb, ftable), bucket);
tdb_access_release(tdb, r);
/* We may be violating lock order here, so best effort. */
break;
}
- if (unlikely(frec_flist(&rec) != flist)
+ if (unlikely(frec_ftable(&rec) != ftable)
|| unlikely(size_to_bucket(frec_len(&rec)) != bucket)) {
add_stat(tdb, alloc_coalesce_race, 1);
tdb_unlock_free_bucket(tdb, nb_off);
/* We have to drop this to avoid deadlocks, so make sure record
* doesn't get coalesced by someone else! */
- rec.flist_and_len = (TDB_FLIST_NONE << (64 - TDB_OFF_UPPER_STEAL))
+ rec.ftable_and_len = (TDB_FTABLE_NONE << (64 - TDB_OFF_UPPER_STEAL))
| (end - off - sizeof(struct tdb_used_record));
if (tdb_write_off(tdb, off + offsetof(struct tdb_free_record,
- flist_and_len),
- rec.flist_and_len) != 0)
+ ftable_and_len),
+ rec.ftable_and_len) != 0)
goto err;
add_stat(tdb, alloc_coalesce_succeeded, 1);
/* We need size bytes to put our key and data in. */
static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
- tdb_off_t flist_off,
+ tdb_off_t ftable_off,
tdb_off_t bucket,
size_t keylen, size_t datalen,
bool want_extra,
+ unsigned magic,
unsigned hashlow)
{
tdb_off_t off, b_off,best_off;
add_stat(tdb, allocs, 1);
again:
- b_off = bucket_off(flist_off, bucket);
+ b_off = bucket_off(ftable_off, bucket);
/* FIXME: Try non-blocking wait first, to measure contention. */
/* Lock this bucket. */
return TDB_OFF_ERR;
}
- best.flist_and_len = -1ULL;
+ best.ftable_and_len = -1ULL;
best_off = 0;
/* Get slack if we're after extra. */
assert(keylen + datalen + leftover <= frec_len(&best));
/* We need to mark non-free before we drop lock, otherwise
* coalesce() could try to merge it! */
- if (set_used_header(tdb, &rec, keylen, datalen,
- frec_len(&best) - leftover,
- hashlow) != 0)
+ if (set_header(tdb, &rec, magic, keylen, datalen,
+ frec_len(&best) - leftover, hashlow) != 0)
goto unlock_err;
if (tdb_write_convert(tdb, best_off, &rec, sizeof(rec)) != 0)
/* Get a free block from current free list, or 0 if none. */
static tdb_off_t get_free(struct tdb_context *tdb,
size_t keylen, size_t datalen, bool want_extra,
- unsigned hashlow)
+ unsigned magic, unsigned hashlow)
{
- tdb_off_t off, flist_off;
- unsigned start_b, b, flist;
+ tdb_off_t off, ftable_off;
+ unsigned start_b, b, ftable;
bool wrapped = false;
/* If they are growing, add 50% to get to higher bucket. */
else
start_b = size_to_bucket(adjust_size(keylen, datalen));
- flist_off = tdb->flist_off;
- flist = tdb->flist;
- while (!wrapped || flist_off != tdb->flist_off) {
+ ftable_off = tdb->ftable_off;
+ ftable = tdb->ftable;
+ while (!wrapped || ftable_off != tdb->ftable_off) {
/* Start at exact size bucket, and search up... */
- for (b = find_free_head(tdb, flist_off, start_b);
+ for (b = find_free_head(tdb, ftable_off, start_b);
b < TDB_FREE_BUCKETS;
- b = find_free_head(tdb, flist_off, b + 1)) {
+ b = find_free_head(tdb, ftable_off, b + 1)) {
/* Try getting one from list. */
- off = lock_and_alloc(tdb, flist_off,
+ off = lock_and_alloc(tdb, ftable_off,
b, keylen, datalen, want_extra,
- hashlow);
+ magic, hashlow);
if (off == TDB_OFF_ERR)
return TDB_OFF_ERR;
if (off != 0) {
if (b == TDB_FREE_BUCKETS - 1)
add_stat(tdb, alloc_bucket_max, 1);
/* Worked? Stay using this list. */
- tdb->flist_off = flist_off;
- tdb->flist = flist;
+ tdb->ftable_off = ftable_off;
+ tdb->ftable = ftable;
return off;
}
/* Didn't work. Try next bucket. */
}
- /* Hmm, try next list. */
- flist_off = next_flist(tdb, flist_off);
- flist++;
+ /* Hmm, try next table. */
+ ftable_off = next_ftable(tdb, ftable_off);
+ ftable++;
- if (flist_off == 0) {
+ if (ftable_off == 0) {
wrapped = true;
- flist_off = first_flist(tdb);
- flist = 0;
+ ftable_off = first_ftable(tdb);
+ ftable = 0;
}
}
return 0;
}
-int set_used_header(struct tdb_context *tdb,
- struct tdb_used_record *rec,
- uint64_t keylen, uint64_t datalen,
- uint64_t actuallen, unsigned hashlow)
+int set_header(struct tdb_context *tdb,
+ struct tdb_used_record *rec,
+ unsigned magic, uint64_t keylen, uint64_t datalen,
+ uint64_t actuallen, unsigned hashlow)
{
uint64_t keybits = (fls64(keylen) + 1) / 2;
rec->magic_and_meta = (hashlow & ((1 << 11)-1))
| ((actuallen - (keylen + datalen)) << 11)
| (keybits << 43)
- | (TDB_MAGIC << 48);
+ | ((uint64_t)magic << 48);
rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
/* Encoding can fail on big values. */
/* This won't fail: it will expand the database if it has to. */
tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
- uint64_t hash, bool growing)
+ uint64_t hash, unsigned magic, bool growing)
{
tdb_off_t off;
assert(!tdb->direct_access);
for (;;) {
- off = get_free(tdb, keylen, datalen, growing, hash);
+ off = get_free(tdb, keylen, datalen, growing, magic, hash);
if (likely(off != 0))
break;
return -1;
if (!next) {
- next = alloc(tdb, 0, sizeof(struct tdb_chain), 2,
- false);
+ next = alloc(tdb, 0, sizeof(struct tdb_chain), 0,
+ TDB_CHAIN_MAGIC, false);
if (next == TDB_OFF_ERR)
return -1;
if (zero_out(tdb, next+sizeof(struct tdb_used_record),
static int expand_group(struct tdb_context *tdb, struct hash_info *h)
{
- unsigned bucket, num_vals, i, hash;
+ unsigned bucket, num_vals, i, magic;
size_t subsize;
tdb_off_t subhash;
tdb_off_t vals[1 << TDB_HASH_GROUP_BITS];
if (h->hash_used == 64) {
add_stat(tdb, alloc_chain, 1);
subsize = sizeof(struct tdb_chain);
- hash = 2;
+ magic = TDB_CHAIN_MAGIC;
} else {
add_stat(tdb, alloc_subhash, 1);
subsize = (sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS);
- hash = 0;
+ magic = TDB_HTABLE_MAGIC;
}
- subhash = alloc(tdb, 0, subsize, hash, false);
+ subhash = alloc(tdb, 0, subsize, 0, magic, false);
if (subhash == TDB_OFF_ERR)
return -1;
ltype);
return -1;
}
- if (rec_magic(&rec) != TDB_MAGIC) {
+ if (rec_magic(&rec) != TDB_USED_MAGIC) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
TDB_DEBUG_FATAL,
"next_in_hash:"
return -1;
}
- /* Lock free lists: there to end of file. */
+ /* Lock free tables: there to end of file. */
if (tdb_brlock(tdb, ltype, TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE,
0, flags)) {
if (!(flags & TDB_LOCK_PROBE)) {
tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_ERROR,
- "tdb_allrecord_lock freelist failed");
+ "tdb_allrecord_lock freetables failed");
}
tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START,
TDB_HASH_LOCK_RANGE);
#define TDB_MAGIC_FOOD "TDB file\n"
#define TDB_VERSION ((uint64_t)(0x26011967 + 7))
-#define TDB_MAGIC ((uint64_t)0x1999)
+#define TDB_USED_MAGIC ((uint64_t)0x1999)
+#define TDB_HTABLE_MAGIC ((uint64_t)0x1888)
+#define TDB_CHAIN_MAGIC ((uint64_t)0x1777)
+#define TDB_FTABLE_MAGIC ((uint64_t)0x1666)
#define TDB_FREE_MAGIC ((uint64_t)0xFE)
#define TDB_HASH_MAGIC (0xA1ABE11A01092008ULL)
#define TDB_RECOVERY_MAGIC (0xf53bc0e7ad124589ULL)
(sizeof(struct tdb_free_record) - sizeof(struct tdb_used_record))
/* Indicates this entry is not on an flist (can happen during coalescing) */
-#define TDB_FLIST_NONE ((1ULL << TDB_OFF_UPPER_STEAL) - 1)
+#define TDB_FTABLE_NONE ((1ULL << TDB_OFF_UPPER_STEAL) - 1)
#if !HAVE_BSWAP_64
static inline uint64_t bswap_64(uint64_t x)
struct tdb_free_record {
uint64_t magic_and_prev; /* TDB_OFF_UPPER_STEAL bits magic, then prev */
- uint64_t flist_and_len; /* Len not counting these two fields. */
+ uint64_t ftable_and_len; /* Len not counting these two fields. */
/* This is why the minimum record size is 8 bytes. */
uint64_t next;
};
static inline uint64_t frec_len(const struct tdb_free_record *f)
{
- return f->flist_and_len & ((1ULL << (64 - TDB_OFF_UPPER_STEAL))-1);
+ return f->ftable_and_len & ((1ULL << (64 - TDB_OFF_UPPER_STEAL))-1);
}
-static inline unsigned frec_flist(const struct tdb_free_record *f)
+static inline unsigned frec_ftable(const struct tdb_free_record *f)
{
- return f->flist_and_len >> (64 - TDB_OFF_UPPER_STEAL);
+ return f->ftable_and_len >> (64 - TDB_OFF_UPPER_STEAL);
}
struct tdb_recovery_record {
uint64_t version; /* version of the code */
uint64_t hash_test; /* result of hashing HASH_MAGIC. */
uint64_t hash_seed; /* "random" seed written at creation time. */
- tdb_off_t free_list; /* (First) free list. */
+ tdb_off_t free_table; /* (First) free table. */
tdb_off_t recovery; /* Transaction recovery area. */
tdb_off_t reserved[26];
tdb_off_t hashtable[1ULL << TDB_TOPLEVEL_HASH_BITS];
};
-struct tdb_freelist {
+struct tdb_freetable {
struct tdb_used_record hdr;
tdb_off_t next;
tdb_off_t buckets[TDB_FREE_BUCKETS];
/* Set if we are in a transaction. */
struct tdb_transaction *transaction;
- /* What freelist are we using? */
- uint64_t flist_off;
- unsigned int flist;
+ /* What free table are we using? */
+ tdb_off_t ftable_off;
+ unsigned int ftable;
/* IO methods: changes for transactions. */
const struct tdb_methods *methods;
bool is_subhash(tdb_off_t val);
/* free.c: */
-int tdb_flist_init(struct tdb_context *tdb);
+int tdb_ftable_init(struct tdb_context *tdb);
/* check.c needs these to iterate through free lists. */
-tdb_off_t first_flist(struct tdb_context *tdb);
-tdb_off_t next_flist(struct tdb_context *tdb, tdb_off_t flist);
+tdb_off_t first_ftable(struct tdb_context *tdb);
+tdb_off_t next_ftable(struct tdb_context *tdb, tdb_off_t ftable);
-/* If this fails, try tdb_expand. */
+/* This returns space or TDB_OFF_ERR. */
tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
- uint64_t hash, bool growing);
+ uint64_t hash, unsigned magic, bool growing);
/* Put this record in a free list. */
int add_free_record(struct tdb_context *tdb,
tdb_off_t off, tdb_len_t len_with_header);
-/* Set up header for a used record. */
-int set_used_header(struct tdb_context *tdb,
- struct tdb_used_record *rec,
- uint64_t keylen, uint64_t datalen,
- uint64_t actuallen, unsigned hashlow);
+/* Set up header for a used/ftable/htable/chain record. */
+int set_header(struct tdb_context *tdb,
+ struct tdb_used_record *rec,
+ unsigned magic, uint64_t keylen, uint64_t datalen,
+ uint64_t actuallen, unsigned hashlow);
/* Used by tdb_check to verify. */
unsigned int size_to_bucket(tdb_len_t data_len);
-tdb_off_t bucket_off(tdb_off_t flist_off, unsigned bucket);
+tdb_off_t bucket_off(tdb_off_t ftable_off, unsigned bucket);
/* Used by tdb_summary */
size_t dead_space(struct tdb_context *tdb, tdb_off_t off);
static bool summarize(struct tdb_context *tdb,
struct tally *hashes,
- struct tally *flists,
+ struct tally *ftables,
struct tally *free,
struct tally *keys,
struct tally *data,
tally_add(buckets, size_to_bucket(len));
len += sizeof(p->u);
unc++;
- } else if (rec_magic(&p->u) == TDB_MAGIC) {
+ } else if (rec_magic(&p->u) == TDB_USED_MAGIC) {
if (unc) {
tally_add(uncoal, unc);
unc = 0;
+ rec_data_length(&p->u)
+ rec_extra_padding(&p->u);
- /* FIXME: Use different magic for hashes, flists. */
- if (!rec_key_length(&p->u) && rec_hash(&p->u) < 3) {
- if (rec_hash(&p->u) == 0) {
- int count = count_hash(tdb,
- off + sizeof(p->u),
- TDB_SUBLEVEL_HASH_BITS);
- if (count == -1)
- return false;
- tally_add(hashes, count);
- } else if (rec_hash(&p->u) == 1) {
- tally_add(flists,
- rec_data_length(&p->u));
- } else if (rec_hash(&p->u) == 2) {
- tally_add(chains, 1);
- }
- } else {
- tally_add(keys, rec_key_length(&p->u));
- tally_add(data, rec_data_length(&p->u));
- }
+ tally_add(keys, rec_key_length(&p->u));
+ tally_add(data, rec_data_length(&p->u));
+ tally_add(extra, rec_extra_padding(&p->u));
+ } else if (rec_magic(&p->u) == TDB_HTABLE_MAGIC) {
+ int count = count_hash(tdb,
+ off + sizeof(p->u),
+ TDB_SUBLEVEL_HASH_BITS);
+ if (count == -1)
+ return false;
+ tally_add(hashes, count);
+ tally_add(extra, rec_extra_padding(&p->u));
+ len = sizeof(p->u)
+ + rec_data_length(&p->u)
+ + rec_extra_padding(&p->u);
+ } else if (rec_magic(&p->u) == TDB_FTABLE_MAGIC) {
+ len = sizeof(p->u)
+ + rec_data_length(&p->u)
+ + rec_extra_padding(&p->u);
+ tally_add(ftables, rec_data_length(&p->u));
+ tally_add(extra, rec_extra_padding(&p->u));
+ } else if (rec_magic(&p->u) == TDB_CHAIN_MAGIC) {
+ len = sizeof(p->u)
+ + rec_data_length(&p->u)
+ + rec_extra_padding(&p->u);
+ tally_add(chains, 1);
tally_add(extra, rec_extra_padding(&p->u));
} else
len = dead_space(tdb, off);
char *tdb_summary(struct tdb_context *tdb, enum tdb_summary_flags flags)
{
tdb_len_t len;
- struct tally *flists, *hashes, *freet, *keys, *data, *extra, *uncoal,
+ struct tally *ftables, *hashes, *freet, *keys, *data, *extra, *uncoal,
*buckets, *chains;
char *hashesg, *freeg, *keysg, *datag, *extrag, *uncoalg, *bucketsg;
char *ret = NULL;
}
/* Start stats off empty. */
- flists = tally_new(HISTO_HEIGHT);
+ ftables = tally_new(HISTO_HEIGHT);
hashes = tally_new(HISTO_HEIGHT);
freet = tally_new(HISTO_HEIGHT);
keys = tally_new(HISTO_HEIGHT);
uncoal = tally_new(HISTO_HEIGHT);
buckets = tally_new(HISTO_HEIGHT);
chains = tally_new(HISTO_HEIGHT);
- if (!flists || !hashes || !freet || !keys || !data || !extra
+ if (!ftables || !hashes || !freet || !keys || !data || !extra
|| !uncoal || !buckets || !chains) {
tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_ERROR,
"tdb_summary: failed to allocate tally structures");
goto unlock;
}
- if (!summarize(tdb, hashes, flists, freet, keys, data, extra, uncoal,
+ if (!summarize(tdb, hashes, ftables, freet, keys, data, extra, uncoal,
buckets, chains))
goto unlock;
tally_total(freet, NULL) * 100.0 / tdb->map_size,
(tally_num(keys) + tally_num(freet) + tally_num(hashes))
* sizeof(struct tdb_used_record) * 100.0 / tdb->map_size,
- tally_num(flists) * sizeof(struct tdb_freelist)
+ tally_num(ftables) * sizeof(struct tdb_freetable)
* 100.0 / tdb->map_size,
(tally_num(hashes)
* (sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS)
free(data);
free(extra);
free(uncoal);
+ free(ftables);
free(chains);
tdb_allrecord_unlock(tdb, F_RDLCK);
struct new_database {
struct tdb_header hdr;
- struct tdb_freelist flist;
+ struct tdb_freetable ftable;
};
/* initialise a new database */
memset(newdb.hdr.hashtable, 0, sizeof(newdb.hdr.hashtable));
/* Free is empty. */
- newdb.hdr.free_list = offsetof(struct new_database, flist);
- memset(&newdb.flist, 0, sizeof(newdb.flist));
- set_used_header(NULL, &newdb.flist.hdr, 0,
- sizeof(newdb.flist) - sizeof(newdb.flist.hdr),
- sizeof(newdb.flist) - sizeof(newdb.flist.hdr), 1);
+ newdb.hdr.free_table = offsetof(struct new_database, ftable);
+ memset(&newdb.ftable, 0, sizeof(newdb.ftable));
+ set_header(NULL, &newdb.ftable.hdr, TDB_FTABLE_MAGIC, 0,
+ sizeof(newdb.ftable) - sizeof(newdb.ftable.hdr),
+ sizeof(newdb.ftable) - sizeof(newdb.ftable.hdr), 0);
/* Magic food */
memset(newdb.hdr.magic_food, 0, sizeof(newdb.hdr.magic_food));
}
tdb_convert(tdb, &hdr.hash_seed, sizeof(hdr.hash_seed));
tdb->hash_seed = hdr.hash_seed;
- tdb_flist_init(tdb);
+ tdb_ftable_init(tdb);
return tdb;
}
goto fail;
}
- if (tdb_flist_init(tdb) == -1)
+ if (tdb_ftable_init(tdb) == -1)
goto fail;
tdb->next = tdbs;
{
uint64_t dataroom = rec_data_length(rec) + rec_extra_padding(rec);
- if (set_used_header(tdb, rec, keylen, datalen, keylen + dataroom, h))
+ if (set_header(tdb, rec, TDB_USED_MAGIC, keylen, datalen,
+ keylen + dataroom, h))
return -1;
return tdb_write_convert(tdb, off, rec, sizeof(*rec));
tdb_off_t new_off;
/* Allocate a new record. */
- new_off = alloc(tdb, key.dsize, dbuf.dsize, h->h, growing);
+ new_off = alloc(tdb, key.dsize, dbuf.dsize, h->h, TDB_USED_MAGIC,
+ growing);
if (unlikely(new_off == TDB_OFF_ERR))
return -1;
layout->elem[layout->num_elems++] = elem;
}
-void tdb_layout_add_freelist(struct tdb_layout *layout)
+void tdb_layout_add_freetable(struct tdb_layout *layout)
{
union tdb_layout_elem elem;
- elem.base.type = FREELIST;
+ elem.base.type = FREETABLE;
add(layout, elem);
}
void tdb_layout_add_free(struct tdb_layout *layout, tdb_len_t len,
- unsigned flist)
+ unsigned ftable)
{
union tdb_layout_elem elem;
elem.base.type = FREE;
elem.free.len = len;
- elem.free.flist_num = flist;
+ elem.free.ftable_num = ftable;
add(layout, elem);
}
+ htable->extra;
}
-static tdb_len_t freelist_len(struct tle_freelist *flist)
+static tdb_len_t freetable_len(struct tle_freetable *ftable)
{
- return sizeof(struct tdb_freelist);
+ return sizeof(struct tdb_freetable);
}
static void set_free_record(void *mem, tdb_len_t len)
{
struct tdb_used_record *u = mem;
- set_used_header(tdb, u, used->key.dsize, used->data.dsize,
- used->key.dsize + used->data.dsize + used->extra,
- tdb_hash(tdb, used->key.dptr, used->key.dsize));
+ set_header(tdb, u, TDB_USED_MAGIC, used->key.dsize, used->data.dsize,
+ used->key.dsize + used->data.dsize + used->extra,
+ tdb_hash(tdb, used->key.dptr, used->key.dsize));
memcpy(u + 1, used->key.dptr, used->key.dsize);
memcpy((char *)(u + 1) + used->key.dsize,
used->data.dptr, used->data.dsize);
struct tdb_used_record *u = mem;
tdb_len_t len = sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS;
- set_used_header(tdb, u, 0, len, len + htable->extra, 0);
+ set_header(tdb, u, TDB_HTABLE_MAGIC, 0, len, len + htable->extra, 0);
memset(u + 1, 0, len);
}
-static void set_freelist(void *mem, struct tdb_context *tdb,
- struct tle_freelist *freelist, struct tdb_header *hdr,
- tdb_off_t last_flist)
+static void set_freetable(void *mem, struct tdb_context *tdb,
+ struct tle_freetable *freetable, struct tdb_header *hdr,
+ tdb_off_t last_ftable)
{
- struct tdb_freelist *flist = mem;
- memset(flist, 0, sizeof(*flist));
- set_used_header(tdb, &flist->hdr, 0,
- sizeof(*flist) - sizeof(flist->hdr),
- sizeof(*flist) - sizeof(flist->hdr), 1);
-
- if (last_flist) {
- flist = (struct tdb_freelist *)((char *)hdr + last_flist);
- flist->next = freelist->base.off;
+ struct tdb_freetable *ftable = mem;
+ memset(ftable, 0, sizeof(*ftable));
+ set_header(tdb, &ftable->hdr, TDB_FTABLE_MAGIC, 0,
+ sizeof(*ftable) - sizeof(ftable->hdr),
+ sizeof(*ftable) - sizeof(ftable->hdr), 0);
+
+ if (last_ftable) {
+ ftable = (struct tdb_freetable *)((char *)hdr + last_ftable);
+ ftable->next = freetable->base.off;
} else {
- hdr->free_list = freelist->base.off;
+ hdr->free_table = freetable->base.off;
}
}
static void add_to_freetable(struct tdb_context *tdb,
tdb_off_t eoff,
tdb_off_t elen,
- unsigned flist,
- struct tle_freelist *freelist)
+ unsigned ftable,
+ struct tle_freetable *freetable)
{
- tdb->flist_off = freelist->base.off;
- tdb->flist = flist;
+ tdb->ftable_off = freetable->base.off;
+ tdb->ftable = ftable;
add_free_record(tdb, eoff, sizeof(struct tdb_used_record) + elen);
}
abort();
}
-static struct tle_freelist *find_flist(struct tdb_layout *layout, unsigned num)
+static struct tle_freetable *find_ftable(struct tdb_layout *layout, unsigned num)
{
unsigned i;
for (i = 0; i < layout->num_elems; i++) {
- if (layout->elem[i].base.type != FREELIST)
+ if (layout->elem[i].base.type != FREETABLE)
continue;
if (num == 0)
- return &layout->elem[i].flist;
+ return &layout->elem[i].ftable;
num--;
}
abort();
struct tdb_context *tdb_layout_get(struct tdb_layout *layout)
{
unsigned int i;
- tdb_off_t off, len, last_flist;
+ tdb_off_t off, len, last_ftable;
char *mem;
struct tdb_context *tdb;
union tdb_layout_elem *e = &layout->elem[i];
e->base.off = off;
switch (e->base.type) {
- case FREELIST:
- len = freelist_len(&e->flist);
+ case FREETABLE:
+ len = freetable_len(&e->ftable);
break;
case FREE:
len = free_record_len(e->free.len);
tdb->map_ptr = mem;
tdb->map_size = off;
- last_flist = 0;
+ last_ftable = 0;
for (i = 0; i < layout->num_elems; i++) {
union tdb_layout_elem *e = &layout->elem[i];
switch (e->base.type) {
- case FREELIST:
- set_freelist(mem + e->base.off, tdb, &e->flist,
- (struct tdb_header *)mem, last_flist);
- last_flist = e->base.off;
+ case FREETABLE:
+ set_freetable(mem + e->base.off, tdb, &e->ftable,
+ (struct tdb_header *)mem, last_ftable);
+ last_ftable = e->base.off;
break;
case FREE:
set_free_record(mem + e->base.off, e->free.len);
break;
}
}
- /* Must have a free list! */
- assert(last_flist);
+ /* Must have a free table! */
+ assert(last_ftable);
/* Now fill the free and hash tables. */
for (i = 0; i < layout->num_elems; i++) {
switch (e->base.type) {
case FREE:
add_to_freetable(tdb, e->base.off, e->free.len,
- e->free.flist_num,
- find_flist(layout, e->free.flist_num));
+ e->free.ftable_num,
+ find_ftable(layout, e->free.ftable_num));
break;
case DATA:
add_to_hashtable(tdb, e->base.off, e->used.key);
}
}
- tdb->flist_off = find_flist(layout, 0)->base.off;
+ tdb->ftable_off = find_ftable(layout, 0)->base.off;
/* Get physical if they asked for it. */
if (layout->filename) {
#include <ccan/tdb2/private.h>
struct tdb_layout *new_tdb_layout(const char *filename);
-void tdb_layout_add_freelist(struct tdb_layout *layout);
+void tdb_layout_add_freetable(struct tdb_layout *layout);
void tdb_layout_add_free(struct tdb_layout *layout, tdb_len_t len,
- unsigned flist);
+ unsigned ftable);
void tdb_layout_add_used(struct tdb_layout *layout,
TDB_DATA key, TDB_DATA data,
tdb_len_t extra);
struct tdb_context *tdb_layout_get(struct tdb_layout *layout);
enum layout_type {
- FREELIST, FREE, DATA, HASHTABLE,
+ FREETABLE, FREE, DATA, HASHTABLE,
};
/* Shared by all union members. */
tdb_off_t off;
};
-struct tle_freelist {
+struct tle_freetable {
struct tle_base base;
};
struct tle_free {
struct tle_base base;
tdb_len_t len;
- unsigned flist_num;
+ unsigned ftable_num;
};
struct tle_used {
union tdb_layout_elem {
struct tle_base base;
- struct tle_freelist flist;
+ struct tle_freetable ftable;
struct tle_free free;
struct tle_used used;
struct tle_hashtable hashtable;
/* We should be able to encode any data value. */
for (i = 0; i < 64; i++)
- ok1(set_used_header(&tdb, &rec, 0, 1ULL << i, 1ULL << i, 0)
- == 0);
+ ok1(set_header(&tdb, &rec, TDB_USED_MAGIC, 0, 1ULL << i,
+ 1ULL << i, 0) == 0);
/* And any key and data with < 64 bits between them. */
for (i = 0; i < 32; i++) {
tdb_len_t dlen = 1ULL >> (63 - i), klen = 1ULL << i;
- ok1(set_used_header(&tdb, &rec, klen, dlen, klen + dlen, 0)
- == 0);
+ ok1(set_header(&tdb, &rec, TDB_USED_MAGIC, klen, dlen,
+ klen + dlen, 0) == 0);
}
/* We should neatly encode all values. */
uint64_t klen = 1ULL << (i < 16 ? i : 15);
uint64_t dlen = 1ULL << i;
uint64_t xlen = 1ULL << (i < 32 ? i : 31);
- ok1(set_used_header(&tdb, &rec, klen, dlen, klen+dlen+xlen, h)
- == 0);
+ ok1(set_header(&tdb, &rec, TDB_USED_MAGIC, klen, dlen,
+ klen+dlen+xlen, h) == 0);
ok1(rec_key_length(&rec) == klen);
ok1(rec_data_length(&rec) == dlen);
ok1(rec_extra_padding(&rec) == xlen);
ok1((uint64_t)rec_hash(&rec) == h);
- ok1(rec_magic(&rec) == TDB_MAGIC);
+ ok1(rec_magic(&rec) == TDB_USED_MAGIC);
}
ok1(tap_log_messages == 0);
return exit_status();
/* No coalescing can be done due to EOF */
layout = new_tdb_layout(NULL);
- tdb_layout_add_freelist(layout);
+ tdb_layout_add_freetable(layout);
len = 1024;
tdb_layout_add_free(layout, len, 0);
tdb = tdb_layout_get(layout);
ok1(free_record_length(tdb, layout->elem[1].base.off) == len);
/* Figure out which bucket free entry is. */
- b_off = bucket_off(tdb->flist_off, size_to_bucket(len));
+ b_off = bucket_off(tdb->ftable_off, size_to_bucket(len));
/* Lock and fail to coalesce. */
ok1(tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == 0);
ok1(coalesce(tdb, layout->elem[1].base.off, b_off, len) == 0);
/* No coalescing can be done due to used record */
layout = new_tdb_layout(NULL);
- tdb_layout_add_freelist(layout);
+ tdb_layout_add_freetable(layout);
tdb_layout_add_free(layout, 1024, 0);
tdb_layout_add_used(layout, key, data, 6);
tdb = tdb_layout_get(layout);
ok1(tdb_check(tdb, NULL, NULL) == 0);
/* Figure out which bucket free entry is. */
- b_off = bucket_off(tdb->flist_off, size_to_bucket(1024));
+ b_off = bucket_off(tdb->ftable_off, size_to_bucket(1024));
/* Lock and fail to coalesce. */
ok1(tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == 0);
ok1(coalesce(tdb, layout->elem[1].base.off, b_off, 1024) == 0);
/* Coalescing can be done due to two free records, then EOF */
layout = new_tdb_layout(NULL);
- tdb_layout_add_freelist(layout);
+ tdb_layout_add_freetable(layout);
tdb_layout_add_free(layout, 1024, 0);
tdb_layout_add_free(layout, 2048, 0);
tdb = tdb_layout_get(layout);
ok1(tdb_check(tdb, NULL, NULL) == 0);
/* Figure out which bucket (first) free entry is. */
- b_off = bucket_off(tdb->flist_off, size_to_bucket(1024));
+ b_off = bucket_off(tdb->ftable_off, size_to_bucket(1024));
/* Lock and coalesce. */
ok1(tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == 0);
ok1(coalesce(tdb, layout->elem[1].base.off, b_off, 1024) == 1);
/* Coalescing can be done due to two free records, then data */
layout = new_tdb_layout(NULL);
- tdb_layout_add_freelist(layout);
+ tdb_layout_add_freetable(layout);
tdb_layout_add_free(layout, 1024, 0);
tdb_layout_add_free(layout, 512, 0);
tdb_layout_add_used(layout, key, data, 6);
ok1(tdb_check(tdb, NULL, NULL) == 0);
/* Figure out which bucket free entry is. */
- b_off = bucket_off(tdb->flist_off, size_to_bucket(1024));
+ b_off = bucket_off(tdb->ftable_off, size_to_bucket(1024));
/* Lock and coalesce. */
ok1(tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == 0);
ok1(coalesce(tdb, layout->elem[1].base.off, b_off, 1024) == 1);
/* Coalescing can be done due to three free records, then EOF */
layout = new_tdb_layout(NULL);
- tdb_layout_add_freelist(layout);
+ tdb_layout_add_freetable(layout);
tdb_layout_add_free(layout, 1024, 0);
tdb_layout_add_free(layout, 512, 0);
tdb_layout_add_free(layout, 256, 0);
ok1(tdb_check(tdb, NULL, NULL) == 0);
/* Figure out which bucket free entry is. */
- b_off = bucket_off(tdb->flist_off, size_to_bucket(1024));
+ b_off = bucket_off(tdb->ftable_off, size_to_bucket(1024));
/* Lock and coalesce. */
ok1(tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == 0);
ok1(coalesce(tdb, layout->elem[1].base.off, b_off, 1024) == 1);
/* FIXME: Check lock length */
/* Allocate a new record. */
- new_off = alloc(tdb, key.dsize, dbuf.dsize, h.h, false);
+ new_off = alloc(tdb, key.dsize, dbuf.dsize, h.h,
+ TDB_USED_MAGIC, false);
ok1(new_off != TDB_OFF_ERR);
/* We should be able to add it now. */
/* We should be able to add it now. */
/* Allocate a new record. */
- new_off = alloc(tdb, key.dsize, dbuf.dsize, h.h, false);
+ new_off = alloc(tdb, key.dsize, dbuf.dsize, h.h,
+ TDB_USED_MAGIC, false);
ok1(new_off != TDB_OFF_ERR);
ok1(add_to_hash(tdb, &h, new_off) == 0);
#include <err.h>
#include "logging.h"
-static bool empty_freelist(struct tdb_context *tdb)
+static bool empty_freetable(struct tdb_context *tdb)
{
- struct tdb_freelist free;
+ struct tdb_freetable free;
unsigned int i;
- /* Now, free list should be completely exhausted in zone 0 */
- if (tdb_read_convert(tdb, tdb->flist_off, &free, sizeof(free)) != 0)
+ /* Now, free table should be completely exhausted in zone 0 */
+ if (tdb_read_convert(tdb, tdb->ftable_off, &free, sizeof(free)) != 0)
abort();
for (i = 0; i < sizeof(free.buckets)/sizeof(free.buckets[0]); i++) {
if (!tdb)
continue;
- ok1(empty_freelist(tdb));
+ ok1(empty_freetable(tdb));
/* Need some hash lock for expand. */
ok1(tdb_lock_hashes(tdb, 0, 1, F_WRLCK, TDB_LOCK_WAIT) == 0);
/* Create some free space. */
ok1(tdb_expand(tdb, 1) == 0);
ok1(tdb_unlock_hashes(tdb, 0, 1, F_WRLCK) == 0);
ok1(tdb_check(tdb, NULL, NULL) == 0);
- ok1(!empty_freelist(tdb));
+ ok1(!empty_freetable(tdb));
size = tdb->map_size;
/* Insert minimal-length records until we expand. */
for (j = 0; tdb->map_size == size; j++) {
- was_empty = empty_freelist(tdb);
+ was_empty = empty_freetable(tdb);
if (tdb_store(tdb, k, k, TDB_INSERT) != 0)
err(1, "Failed to store record %i", j);
}
/* Would have been empty before expansion, but no longer. */
ok1(was_empty);
- ok1(!empty_freelist(tdb));
+ ok1(!empty_freetable(tdb));
tdb_close(tdb);
}
data.dsize = 5;
key.dsize = 5;
- /* Create a TDB with three free lists. */
+ /* Create a TDB with three free tables. */
layout = new_tdb_layout(NULL);
- tdb_layout_add_freelist(layout);
- tdb_layout_add_freelist(layout);
- tdb_layout_add_freelist(layout);
+ tdb_layout_add_freetable(layout);
+ tdb_layout_add_freetable(layout);
+ tdb_layout_add_freetable(layout);
tdb_layout_add_free(layout, 80, 0);
/* Used record prevent coalescing. */
tdb_layout_add_used(layout, key, data, 6);
tdb = tdb_layout_get(layout);
ok1(tdb_check(tdb, NULL, NULL) == 0);
- off = get_free(tdb, 0, 80 - sizeof(struct tdb_used_record), 0, 0);
+ off = get_free(tdb, 0, 80 - sizeof(struct tdb_used_record), 0,
+ TDB_USED_MAGIC, 0);
ok1(off == layout->elem[3].base.off);
- ok1(tdb->flist_off == layout->elem[0].base.off);
+ ok1(tdb->ftable_off == layout->elem[0].base.off);
- off = get_free(tdb, 0, 160 - sizeof(struct tdb_used_record), 0, 0);
+ off = get_free(tdb, 0, 160 - sizeof(struct tdb_used_record), 0,
+ TDB_USED_MAGIC, 0);
ok1(off == layout->elem[5].base.off);
- ok1(tdb->flist_off == layout->elem[1].base.off);
+ ok1(tdb->ftable_off == layout->elem[1].base.off);
- off = get_free(tdb, 0, 320 - sizeof(struct tdb_used_record), 0, 0);
+ off = get_free(tdb, 0, 320 - sizeof(struct tdb_used_record), 0,
+ TDB_USED_MAGIC, 0);
ok1(off == layout->elem[7].base.off);
- ok1(tdb->flist_off == layout->elem[2].base.off);
+ ok1(tdb->ftable_off == layout->elem[2].base.off);
- off = get_free(tdb, 0, 40 - sizeof(struct tdb_used_record), 0, 0);
+ off = get_free(tdb, 0, 40 - sizeof(struct tdb_used_record), 0,
+ TDB_USED_MAGIC, 0);
ok1(off == layout->elem[9].base.off);
- ok1(tdb->flist_off == layout->elem[0].base.off);
+ ok1(tdb->ftable_off == layout->elem[0].base.off);
/* Now we fail. */
- off = get_free(tdb, 0, 0, 1, 0);
+ off = get_free(tdb, 0, 0, 1, TDB_USED_MAGIC, 0);
ok1(off == 0);
tdb_close(tdb);