/* And 8 entries in each group, ie 8 groups per sublevel. */
#define TDB_HASH_GROUP_BITS 3
-/* Extend file by least 32 times larger than needed. */
-#define TDB_EXTENSION_FACTOR 32
+/* Extend file by least 100 times larger than needed. */
+#define TDB_EXTENSION_FACTOR 100
/* We steal bits from the offsets to store hash info. */
#define TDB_OFF_HASH_GROUP_MASK ((1ULL << TDB_HASH_GROUP_BITS) - 1)
}
struct tdb_free_record {
- uint64_t magic_and_meta; /* TDB_OFF_UPPER_STEAL bits of magic */
- uint64_t data_len; /* Not counting these two fields. */
- /* This is why the minimum record size is 16 bytes. */
- uint64_t next, prev;
+ uint64_t magic_and_prev; /* TDB_OFF_UPPER_STEAL bits magic, then prev */
+ uint64_t flist_and_len; /* Len not counting these two fields. */
+ /* This is why the minimum record size is 8 bytes. */
+ uint64_t next;
};
+static inline uint64_t frec_prev(const struct tdb_free_record *f)
+{
+ return f->magic_and_prev & ((1ULL << (64 - TDB_OFF_UPPER_STEAL)) - 1);
+}
+
static inline uint64_t frec_magic(const struct tdb_free_record *f)
{
- return f->magic_and_meta >> (64 - TDB_OFF_UPPER_STEAL);
+ return f->magic_and_prev >> (64 - TDB_OFF_UPPER_STEAL);
}
-static inline uint64_t frec_flist(const struct tdb_free_record *f)
+static inline uint64_t frec_len(const struct tdb_free_record *f)
{
- return f->magic_and_meta & ((1ULL << (64 - TDB_OFF_UPPER_STEAL)) - 1);
+ return f->flist_and_len & ((1ULL << (64 - TDB_OFF_UPPER_STEAL))-1);
+}
+
+static inline unsigned frec_flist(const struct tdb_free_record *f)
+{
+ return f->flist_and_len >> (64 - TDB_OFF_UPPER_STEAL);
}
struct tdb_recovery_record {
/* What freelist are we using? */
uint64_t flist_off;
+ unsigned int flist;
/* IO methods: changes for transactions. */
const struct tdb_methods *methods;
uint64_t num_lockrecs;
struct tdb_lock_type *lockrecs;
+ struct tdb_attribute_stats *stats;
+
/* Single list of all TDBs, to avoid multiple opens. */
struct tdb_context *next;
dev_t device;
tdb_off_t off, tdb_len_t len_with_header);
/* Set up header for a used record. */
-int set_header(struct tdb_context *tdb,
- struct tdb_used_record *rec,
- uint64_t keylen, uint64_t datalen,
- uint64_t actuallen, unsigned hashlow);
+int set_used_header(struct tdb_context *tdb,
+ struct tdb_used_record *rec,
+ uint64_t keylen, uint64_t datalen,
+ uint64_t actuallen, unsigned hashlow);
/* Used by tdb_check to verify. */
unsigned int size_to_bucket(tdb_len_t data_len);
void tdb_munmap(struct tdb_context *tdb);
void tdb_mmap(struct tdb_context *tdb);
-/* Either make a copy into pad and return that, or return ptr into mmap.
- * Converts endian (ie. will use pad in that case). */
-void *tdb_get(struct tdb_context *tdb, tdb_off_t off, void *pad, size_t len);
-
/* Either alloc a copy, or give direct access. Release frees or noop. */
const void *tdb_access_read(struct tdb_context *tdb,
tdb_off_t off, tdb_len_t len, bool convert);
void *tdb_access_write(struct tdb_context *tdb,
tdb_off_t off, tdb_len_t len, bool convert);
+/* Is this pointer direct? (Otherwise it's malloced) */
+bool is_direct(const struct tdb_context *tdb, const void *p);
+
/* Release result of tdb_access_read/write. */
void tdb_access_release(struct tdb_context *tdb, const void *p);
/* Commit result of tdb_acces_write. */
int tdb_read_convert(struct tdb_context *tdb, tdb_off_t off,
void *rec, size_t len);
+/* Adds a stat, if it's in range. */
+void add_stat_(struct tdb_context *tdb, uint64_t *stat, size_t val);
+#define add_stat(tdb, statname, val) \
+ do { \
+ if (unlikely((tdb)->stats)) \
+ add_stat_((tdb), &(tdb)->stats->statname, (val)); \
+ } while (0)
/* lock.c: */
void tdb_lock_init(struct tdb_context *tdb);