#include "config.h"
#include <ccan/tdb2/tdb2.h>
#include <ccan/likely/likely.h>
+#include <ccan/compiler/compiler.h>
#ifdef HAVE_BYTESWAP_H
#include <byteswap.h>
#endif
#define TDB_VERSION ((uint64_t)(0x26011967 + 7))
#define TDB_MAGIC ((uint64_t)0x1999)
#define TDB_FREE_MAGIC ((uint64_t)0xFE)
-#define TDB_COALESCING_MAGIC ((uint64_t)0xFD)
#define TDB_HASH_MAGIC (0xA1ABE11A01092008ULL)
#define TDB_RECOVERY_MAGIC (0xf53bc0e7ad124589ULL)
#define TDB_RECOVERY_INVALID_MAGIC (0x0ULL)
/* And 8 entries in each group, ie 8 groups per sublevel. */
#define TDB_HASH_GROUP_BITS 3
-/* Extend file by least 32 times larger than needed. */
-#define TDB_EXTENSION_FACTOR 32
+/* Extend file by least 100 times larger than needed. */
+#define TDB_EXTENSION_FACTOR 100
/* We steal bits from the offsets to store hash info. */
#define TDB_OFF_HASH_GROUP_MASK ((1ULL << TDB_HASH_GROUP_BITS) - 1)
/* We steal this many upper bits, giving a maximum offset of 64 exabytes. */
#define TDB_OFF_UPPER_STEAL 8
#define TDB_OFF_UPPER_STEAL_EXTRA 7
-#define TDB_OFF_UPPER_STEAL_TRUNCBIT 1
-/* If this is set, hash is truncated (only 1 bit is valid). */
-#define TDB_OFF_HASH_TRUNCATED_BIT 56
-/* The bit number where we store next level of hash. */
+/* The bit number where we store extra hash bits. */
#define TDB_OFF_HASH_EXTRA_BIT 57
+#define TDB_OFF_UPPER_STEAL_SUBHASH_BIT 56
+
+/* The bit number where we store the extra hash bits. */
/* Convenience mask to get actual offset. */
#define TDB_OFF_MASK \
(((1ULL << (64 - TDB_OFF_UPPER_STEAL)) - 1) - TDB_OFF_HASH_GROUP_MASK)
#define TDB_MIN_DATA_LEN \
(sizeof(struct tdb_free_record) - sizeof(struct tdb_used_record))
+/* Indicates this entry is not on an flist (can happen during coalescing) */
+#define TDB_FLIST_NONE ((1ULL << TDB_OFF_UPPER_STEAL) - 1)
+
#if !HAVE_BSWAP_64
static inline uint64_t bswap_64(uint64_t x)
{
}
struct tdb_free_record {
- uint64_t magic_and_meta; /* TDB_OFF_UPPER_STEAL bits of magic */
- uint64_t data_len; /* Not counting these two fields. */
- /* This is why the minimum record size is 16 bytes. */
- uint64_t next, prev;
+ uint64_t magic_and_prev; /* TDB_OFF_UPPER_STEAL bits magic, then prev */
+ uint64_t flist_and_len; /* Len not counting these two fields. */
+ /* This is why the minimum record size is 8 bytes. */
+ uint64_t next;
};
+static inline uint64_t frec_prev(const struct tdb_free_record *f)
+{
+ return f->magic_and_prev & ((1ULL << (64 - TDB_OFF_UPPER_STEAL)) - 1);
+}
+
static inline uint64_t frec_magic(const struct tdb_free_record *f)
{
- return f->magic_and_meta >> (64 - TDB_OFF_UPPER_STEAL);
+ return f->magic_and_prev >> (64 - TDB_OFF_UPPER_STEAL);
}
-static inline uint64_t frec_flist(const struct tdb_free_record *f)
+static inline uint64_t frec_len(const struct tdb_free_record *f)
{
- return f->magic_and_meta & ((1ULL << (64 - TDB_OFF_UPPER_STEAL)) - 1);
+ return f->flist_and_len & ((1ULL << (64 - TDB_OFF_UPPER_STEAL))-1);
+}
+
+static inline unsigned frec_flist(const struct tdb_free_record *f)
+{
+ return f->flist_and_len >> (64 - TDB_OFF_UPPER_STEAL);
}
struct tdb_recovery_record {
uint32_t flags;
/* Logging function */
- tdb_logfn_t log;
- void *log_priv;
+ tdb_logfn_t logfn;
+ void *log_private;
/* Hash function. */
tdb_hashfn_t khash;
/* What freelist are we using? */
uint64_t flist_off;
+ unsigned int flist;
/* IO methods: changes for transactions. */
const struct tdb_methods *methods;
/* Lock information */
struct tdb_lock_type allrecord_lock;
- uint64_t num_lockrecs;
+ size_t num_lockrecs;
struct tdb_lock_type *lockrecs;
+ struct tdb_attribute_stats *stats;
+
/* Single list of all TDBs, to avoid multiple opens. */
struct tdb_context *next;
dev_t device;
tdb_off_t off, tdb_len_t len_with_header);
/* Set up header for a used record. */
-int set_header(struct tdb_context *tdb,
- struct tdb_used_record *rec,
- uint64_t keylen, uint64_t datalen,
- uint64_t actuallen, unsigned hashlow);
+int set_used_header(struct tdb_context *tdb,
+ struct tdb_used_record *rec,
+ uint64_t keylen, uint64_t datalen,
+ uint64_t actuallen, unsigned hashlow);
/* Used by tdb_check to verify. */
unsigned int size_to_bucket(tdb_len_t data_len);
tdb_off_t bucket_off(tdb_off_t flist_off, unsigned bucket);
+/* Used by tdb_summary */
+size_t dead_space(struct tdb_context *tdb, tdb_off_t off);
+
/* io.c: */
/* Initialize tdb->methods. */
void tdb_io_init(struct tdb_context *tdb);
void tdb_munmap(struct tdb_context *tdb);
void tdb_mmap(struct tdb_context *tdb);
-/* Either make a copy into pad and return that, or return ptr into mmap.
- * Converts endian (ie. will use pad in that case). */
-void *tdb_get(struct tdb_context *tdb, tdb_off_t off, void *pad, size_t len);
-
/* Either alloc a copy, or give direct access. Release frees or noop. */
const void *tdb_access_read(struct tdb_context *tdb,
tdb_off_t off, tdb_len_t len, bool convert);
void *tdb_access_write(struct tdb_context *tdb,
tdb_off_t off, tdb_len_t len, bool convert);
+/* Is this pointer direct? (Otherwise it's malloced) */
+bool is_direct(const struct tdb_context *tdb, const void *p);
+
/* Release result of tdb_access_read/write. */
void tdb_access_release(struct tdb_context *tdb, const void *p);
/* Commit result of tdb_acces_write. */
int tdb_read_convert(struct tdb_context *tdb, tdb_off_t off,
void *rec, size_t len);
+/* Adds a stat, if it's in range. */
+void add_stat_(struct tdb_context *tdb, uint64_t *stat, size_t val);
+#define add_stat(tdb, statname, val) \
+ do { \
+ if (unlikely((tdb)->stats)) \
+ add_stat_((tdb), &(tdb)->stats->statname, (val)); \
+ } while (0)
/* lock.c: */
void tdb_lock_init(struct tdb_context *tdb);
int tdb_transaction_recover(struct tdb_context *tdb);
bool tdb_needs_recovery(struct tdb_context *tdb);
+/* tdb.c: */
+void COLD tdb_logerr(struct tdb_context *tdb,
+ enum TDB_ERROR ecode,
+ enum tdb_debug_level level,
+ const char *fmt, ...);
+
#ifdef TDB_TRACE
void tdb_trace(struct tdb_context *tdb, const char *op);
void tdb_trace_seqnum(struct tdb_context *tdb, uint32_t seqnum, const char *op);