#include "private.h"
#include <ccan/tdb2/tdb2.h>
-#include <ccan/hash/hash.h>
+#include <ccan/build_assert/build_assert.h>
#include <ccan/likely/likely.h>
#include <assert.h>
{
}
-/* We do a lot of work assuming our copy of the header volatile area
- * is uptodate, and usually it is. However, once we grab a lock, we have to
- * re-check it. */
-bool update_header(struct tdb_context *tdb)
-{
- struct tdb_header_volatile pad, *v;
-
- if (tdb->header_uptodate) {
- tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
- "warning: header uptodate already\n");
- }
-
- /* We could get a partial update if we're not holding any locks. */
- assert(tdb_has_locks(tdb));
-
- v = tdb_get(tdb, offsetof(struct tdb_header, v), &pad, sizeof(*v));
- if (!v) {
- /* On failure, imply we updated header so they retry. */
- return true;
- }
- tdb->header_uptodate = true;
- if (likely(memcmp(&tdb->header.v, v, sizeof(*v)) == 0)) {
- return false;
- }
- tdb->header.v = *v;
- return true;
-}
-
-static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
- void *arg)
-{
- return hash64_any(key, length, seed);
-}
-
-uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
-{
- return tdb->khash(ptr, len, tdb->header.hash_seed, tdb->hash_priv);
-}
-
static bool tdb_already_open(dev_t device, ino_t ino)
{
struct tdb_context *i;
fd = open("/dev/urandom", O_RDONLY);
if (fd >= 0) {
- if (read(fd, &ret, sizeof(ret)) == sizeof(ret)) {
+ if (tdb_read_all(fd, &ret, sizeof(ret))) {
tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
"tdb_open: random from /dev/urandom\n");
close(fd);
struct new_database {
struct tdb_header hdr;
- struct tdb_used_record hrec;
- tdb_off_t hash[1ULL << INITIAL_HASH_BITS];
- struct tdb_used_record frec;
- tdb_off_t free[INITIAL_FREE_BUCKETS + 1]; /* One overflow bucket */
+ /* Initial free zone. */
+ struct free_zone_header zhdr;
+ tdb_off_t free[BUCKETS_FOR_ZONE(INITIAL_ZONE_BITS) + 1];
+ struct tdb_free_record frec;
+ /* Rest up to 1 << INITIAL_ZONE_BITS is empty. */
+ char space[(1 << INITIAL_ZONE_BITS)
+ - sizeof(struct free_zone_header)
+ - sizeof(tdb_off_t) * (BUCKETS_FOR_ZONE(INITIAL_ZONE_BITS)+1)
+ - sizeof(struct tdb_free_record)];
+ uint8_t tailer;
+ /* Don't count final padding! */
};
/* initialise a new database */
-static int tdb_new_database(struct tdb_context *tdb)
+static int tdb_new_database(struct tdb_context *tdb, struct tdb_header *hdr)
{
/* We make it up in memory, then write it out if not internal */
struct new_database newdb;
+ unsigned int bucket, magic_len, dbsize;
+
+ /* Don't want any extra padding! */
+ dbsize = offsetof(struct new_database, tailer) + sizeof(newdb.tailer);
/* Fill in the header */
newdb.hdr.version = TDB_VERSION;
sizeof(newdb.hdr.hash_test),
newdb.hdr.hash_seed,
tdb->hash_priv);
+ memset(newdb.hdr.reserved, 0, sizeof(newdb.hdr.reserved));
+ /* Initial hashes are empty. */
+ memset(newdb.hdr.hashtable, 0, sizeof(newdb.hdr.hashtable));
- newdb.hdr.v.generation = 0;
-
- /* Free array has 1 zone, 10 buckets. All buckets empty. */
- newdb.hdr.v.num_zones = 1;
- newdb.hdr.v.zone_bits = INITIAL_ZONE_BITS;
- newdb.hdr.v.free_buckets = INITIAL_FREE_BUCKETS;
- newdb.hdr.v.free_off = offsetof(struct new_database, free);
- set_header(tdb, &newdb.frec, 0,
- sizeof(newdb.free), sizeof(newdb.free), 0);
+ /* Free is mostly empty... */
+ newdb.zhdr.zone_bits = INITIAL_ZONE_BITS;
memset(newdb.free, 0, sizeof(newdb.free));
- /* Initial hashes are empty. */
- newdb.hdr.v.hash_bits = INITIAL_HASH_BITS;
- newdb.hdr.v.hash_off = offsetof(struct new_database, hash);
- set_header(tdb, &newdb.hrec, 0,
- sizeof(newdb.hash), sizeof(newdb.hash), 0);
- memset(newdb.hash, 0, sizeof(newdb.hash));
+ /* Create the single free entry. */
+ newdb.frec.magic_and_meta = TDB_FREE_MAGIC | INITIAL_ZONE_BITS;
+ newdb.frec.data_len = (sizeof(newdb.frec)
+ - sizeof(struct tdb_used_record)
+ + sizeof(newdb.space));
+
+ /* Add it to the correct bucket. */
+ bucket = size_to_bucket(INITIAL_ZONE_BITS, newdb.frec.data_len);
+ newdb.free[bucket] = offsetof(struct new_database, frec);
+ newdb.frec.next = newdb.frec.prev = 0;
+
+ /* Clear free space to keep valgrind happy, and avoid leaking stack. */
+ memset(newdb.space, 0, sizeof(newdb.space));
+
+ /* Tailer contains maximum number of free_zone bits. */
+ newdb.tailer = INITIAL_ZONE_BITS;
+
+ /* Magic food */
+ memset(newdb.hdr.magic_food, 0, sizeof(newdb.hdr.magic_food));
+ strcpy(newdb.hdr.magic_food, TDB_MAGIC_FOOD);
+
+ /* This creates an endian-converted database, as if read from disk */
+ magic_len = sizeof(newdb.hdr.magic_food);
+ tdb_convert(tdb,
+ (char *)&newdb.hdr + magic_len,
+ offsetof(struct new_database, space) - magic_len);
+
+ *hdr = newdb.hdr;
if (tdb->flags & TDB_INTERNAL) {
- tdb->map_size = sizeof(newdb);
+ tdb->map_size = dbsize;
tdb->map_ptr = malloc(tdb->map_size);
if (!tdb->map_ptr) {
tdb->ecode = TDB_ERR_OOM;
return -1;
}
memcpy(tdb->map_ptr, &newdb, tdb->map_size);
- tdb->header = newdb.hdr;
- /* Convert the `ondisk' version if asked. */
- tdb_convert(tdb, tdb->map_ptr, sizeof(newdb));
return 0;
}
if (lseek(tdb->fd, 0, SEEK_SET) == -1)
if (ftruncate(tdb->fd, 0) == -1)
return -1;
- /* This creates an endian-converted header, as if read from disk */
- tdb->header = newdb.hdr;
- tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
-
- /* Don't endian-convert the magic food! */
- memset(newdb.hdr.magic_food, 0, sizeof(newdb.hdr.magic_food));
- strcpy(newdb.hdr.magic_food, TDB_MAGIC_FOOD);
-
- if (!tdb_pwrite_all(tdb->fd, &newdb, sizeof(newdb), 0)) {
+ if (!tdb_pwrite_all(tdb->fd, &newdb, dbsize, 0)) {
tdb->ecode = TDB_ERR_IO;
return -1;
}
int save_errno;
uint64_t hash_test;
unsigned v;
+ struct tdb_header hdr;
- if (!(tdb = (struct tdb_context *)calloc(1, sizeof *tdb))) {
+ tdb = malloc(sizeof(*tdb));
+ if (!tdb) {
/* Can't log this */
errno = ENOMEM;
goto fail;
}
- tdb->fd = -1;
tdb->name = NULL;
tdb->map_ptr = NULL;
+ tdb->direct_access = 0;
+ tdb->fd = -1;
+ tdb->map_size = sizeof(struct tdb_header);
+ tdb->ecode = TDB_SUCCESS;
tdb->flags = tdb_flags;
tdb->log = null_log_fn;
tdb->log_priv = NULL;
- tdb->khash = jenkins_hash;
- tdb->hash_priv = NULL;
+ tdb->transaction = NULL;
+ tdb_hash_init(tdb);
+ /* last_zone will be set below. */
+ tdb_io_init(tdb);
+ tdb_lock_init(tdb);
- /* FIXME */
- if (attr) {
- tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
- "tdb_open: attributes not yet supported\n");
- errno = EINVAL;
- goto fail;
+ while (attr) {
+ switch (attr->base.attr) {
+ case TDB_ATTRIBUTE_LOG:
+ tdb->log = attr->log.log_fn;
+ tdb->log_priv = attr->log.log_private;
+ break;
+ case TDB_ATTRIBUTE_HASH:
+ tdb->khash = attr->hash.hash_fn;
+ tdb->hash_priv = attr->hash.hash_private;
+ break;
+ default:
+ tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
+ "tdb_open: unknown attribute type %u\n",
+ attr->base.attr);
+ errno = EINVAL;
+ goto fail;
+ }
+ attr = attr->base.next;
}
if ((open_flags & O_ACCMODE) == O_WRONLY) {
}
if ((open_flags & O_ACCMODE) == O_RDONLY) {
- tdb->read_only = 1;
+ tdb->read_only = true;
/* read only databases don't do locking */
tdb->flags |= TDB_NOLOCK;
- }
+ } else
+ tdb->read_only = false;
- /* internal databases don't mmap or lock */
+ /* internal databases don't need any of the rest. */
if (tdb->flags & TDB_INTERNAL) {
tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP);
- if (tdb_new_database(tdb) != 0) {
+ if (tdb_new_database(tdb, &hdr) != 0) {
tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
"tdb_open: tdb_new_database failed!");
goto fail;
}
- TEST_IT(tdb->flags & TDB_CONVERT);
- goto internal;
+ tdb_convert(tdb, &hdr.hash_seed, sizeof(hdr.hash_seed));
+ tdb->hash_seed = hdr.hash_seed;
+ tdb_zone_init(tdb);
+ return tdb;
}
if ((tdb->fd = open(name, open_flags, mode)) == -1) {
goto fail; /* errno set by tdb_brlock */
}
- errno = 0;
- if (read(tdb->fd, &tdb->header, sizeof(tdb->header)) != sizeof(tdb->header)
- || strcmp(tdb->header.magic_food, TDB_MAGIC_FOOD) != 0) {
- if (!(open_flags & O_CREAT) || tdb_new_database(tdb) == -1) {
+ if (!tdb_pread_all(tdb->fd, &hdr, sizeof(hdr), 0)
+ || strcmp(hdr.magic_food, TDB_MAGIC_FOOD) != 0) {
+ if (!(open_flags & O_CREAT) || tdb_new_database(tdb, &hdr) == -1) {
if (errno == 0) {
errno = EIO; /* ie bad format or something */
}
goto fail;
}
- } else if (tdb->header.version != TDB_VERSION) {
- if (tdb->header.version == bswap_64(TDB_VERSION))
+ } else if (hdr.version != TDB_VERSION) {
+ if (hdr.version == bswap_64(TDB_VERSION))
tdb->flags |= TDB_CONVERT;
else {
/* wrong version */
tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
"tdb_open: %s is unknown version 0x%llx\n",
- name, (long long)tdb->header.version);
+ name, (long long)hdr.version);
errno = EIO;
goto fail;
}
}
- tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
+ tdb_convert(tdb, &hdr, sizeof(hdr));
+ tdb->hash_seed = hdr.hash_seed;
hash_test = TDB_HASH_MAGIC;
- hash_test = tdb->khash(&hash_test, sizeof(hash_test),
- tdb->header.hash_seed, tdb->hash_priv);
- if (tdb->header.hash_test != hash_test) {
+ hash_test = tdb_hash(tdb, &hash_test, sizeof(hash_test));
+ if (hdr.hash_test != hash_test) {
/* wrong hash variant */
tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
"tdb_open: %s uses a different hash function\n",
goto fail;
}
- tdb->map_size = st.st_size;
tdb->device = st.st_dev;
tdb->inode = st.st_ino;
- tdb_io_init(tdb);
- tdb_mmap(tdb);
-
- internal:
- /* Internal (memory-only) databases skip all the code above to
- * do with disk files, and resume here by releasing their
- * open lock and hooking into the active list. */
tdb_unlock_open(tdb);
- tdb->last_zone = random_free_zone(tdb);
+
+ /* This make sure we have current map_size and mmap. */
+ tdb->methods->oob(tdb, tdb->map_size + 1, true);
+
+ /* Now we can pick a random free zone to start from. */
+ if (tdb_zone_init(tdb) == -1)
+ goto fail;
+
tdb->next = tdbs;
tdbs = tdb;
return tdb;
return NULL;
}
-static int tdb_key_compare(TDB_DATA key, TDB_DATA data, void *private_data)
-{
- return memcmp(data.dptr, key.dptr, data.dsize) == 0;
-}
-
-static void unlock_lists(struct tdb_context *tdb,
- uint64_t start, uint64_t end, int ltype)
-{
- do {
- tdb_unlock_list(tdb, start, ltype);
- start = (start + ((1ULL << tdb->header.v.hash_bits) - 1))
- & ((1ULL << tdb->header.v.hash_bits) - 1);
- } while (start != end);
-}
-
-/* FIXME: Return header copy? */
-/* Returns -1 or offset of entry (0 if not found).
- * Locks hash entried from *start to *end (where the entry was found). */
-static tdb_off_t find_bucket_and_lock(struct tdb_context *tdb,
- const struct tdb_data *key,
- uint64_t hash,
- uint64_t *start,
- uint64_t *end,
- uint64_t *room,
- int ltype)
-{
- uint64_t hextra;
- tdb_off_t off;
-
- /* hash_bits might be out of date... */
-again:
- *start = *end = hash & ((1ULL << tdb->header.v.hash_bits) - 1);
- hextra = hash >> tdb->header.v.hash_bits;
-
- /* FIXME: can we avoid locks for some fast paths? */
- if (tdb_lock_list(tdb, *end, ltype, TDB_LOCK_WAIT) == -1)
- return TDB_OFF_ERR;
-
- /* We only need to check this for first lock. */
- if (unlikely(update_header(tdb))) {
- tdb_unlock_list(tdb, *end, ltype);
- goto again;
- }
-
- while ((off = tdb_read_off(tdb, tdb->header.v.hash_off
- + *end * sizeof(tdb_off_t)))
- != TDB_OFF_ERR) {
- struct tdb_used_record pad, *r;
- uint64_t keylen, next;
-
- /* Didn't find it? */
- if (!off)
- return 0;
-
-#if 0 /* FIXME: Check other bits. */
- unsigned int bits, bitmask, hoffextra;
- /* Bottom three bits show how many extra hash bits. */
- bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1;
- bitmask = (1 << bits)-1;
- hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask);
- if ((hextra & bitmask) != hoffextra)
- goto lock_next;
-#endif
-
- r = tdb_get(tdb, off, &pad, sizeof(*r));
- if (!r)
- goto unlock_err;
-
- if (rec_magic(r) != TDB_MAGIC) {
- tdb->ecode = TDB_ERR_CORRUPT;
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "find_bucket_and_lock: bad magic 0x%llx"
- " at offset %llu!\n",
- (long long)rec_magic(r), (long long)off);
- goto unlock_err;
- }
-
- /* FIXME: check extra bits in header! */
- keylen = rec_key_length(r);
- if (keylen != key->dsize)
- goto lock_next;
-
- switch (tdb_parse_data(tdb, *key, off + sizeof(*r), key->dsize,
- tdb_key_compare, NULL)) {
- case 1:
- /* Match! */
- *room = rec_data_length(r) + rec_extra_padding(r);
- return off >> TDB_EXTRA_HASHBITS_NUM;
- case 0:
- break;
- default:
- goto unlock_err;
- }
-
- lock_next:
- /* Lock next bucket. */
- /* FIXME: We can deadlock if this wraps! */
- next = (*end + 1) & ((1ULL << tdb->header.v.hash_bits) - 1);
- if (next == *start) {
- tdb->ecode = TDB_ERR_CORRUPT;
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "find_bucket_and_lock: full hash table!\n");
- goto unlock_err;
- }
- if (tdb_lock_list(tdb, next, ltype, TDB_LOCK_WAIT) == -1)
- goto unlock_err;
- *end = next;
- }
-
-unlock_err:
- TEST_IT(*end < *start);
- unlock_lists(tdb, *start, *end, ltype);
- return TDB_OFF_ERR;
-}
-
+/* FIXME: modify, don't rewrite! */
static int update_rec_hdr(struct tdb_context *tdb,
tdb_off_t off,
tdb_len_t keylen,
tdb_len_t datalen,
- tdb_len_t room,
+ struct tdb_used_record *rec,
uint64_t h)
{
- struct tdb_used_record rec;
+ uint64_t dataroom = rec_data_length(rec) + rec_extra_padding(rec);
- if (set_header(tdb, &rec, keylen, datalen, room - datalen, h))
+ if (set_header(tdb, rec, keylen, datalen, keylen + dataroom, h,
+ rec_zone_bits(rec)))
return -1;
- return tdb_write_convert(tdb, off, &rec, sizeof(rec));
+ return tdb_write_convert(tdb, off, rec, sizeof(*rec));
}
-/* If we fail, others will try after us. */
-static void enlarge_hash(struct tdb_context *tdb)
+/* Returns -1 on error, 0 on OK */
+static int replace_data(struct tdb_context *tdb,
+ struct hash_info *h,
+ struct tdb_data key, struct tdb_data dbuf,
+ tdb_off_t old_off, tdb_len_t old_room,
+ unsigned old_zone,
+ bool growing)
{
- tdb_off_t newoff, i;
- uint64_t h, num = 1ULL << tdb->header.v.hash_bits;
- struct tdb_used_record pad, *r;
-
- /* FIXME: We should do this without holding locks throughout. */
- if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
- return;
-
- if (unlikely(update_header(tdb))) {
- /* Someone else enlarged for us? Nothing to do. */
- if ((1ULL << tdb->header.v.hash_bits) != num)
- goto unlock;
- }
+ tdb_off_t new_off;
- newoff = alloc(tdb, 0, num * 2, 0, false);
- if (unlikely(newoff == TDB_OFF_ERR))
- goto unlock;
- if (unlikely(newoff == 0)) {
- if (tdb_expand(tdb, 0, num * 2, false) == -1)
- goto unlock;
- newoff = alloc(tdb, 0, num * 2, 0, false);
- if (newoff == TDB_OFF_ERR || newoff == 0)
- goto unlock;
- }
+ /* Allocate a new record. */
+ new_off = alloc(tdb, key.dsize, dbuf.dsize, h->h, growing);
+ if (unlikely(new_off == TDB_OFF_ERR))
+ return -1;
- /* FIXME: If the space before is empty, we know this is in its ideal
- * location. We can steal a bit from the pointer to avoid rehash. */
- for (i = tdb_find_nonzero_off(tdb, tdb->header.v.hash_off, num);
- i < num;
- i += tdb_find_nonzero_off(tdb, tdb->header.v.hash_off
- + i*sizeof(tdb_off_t), num - i)) {
- tdb_off_t off;
- off = tdb_read_off(tdb, tdb->header.v.hash_off
- + i*sizeof(tdb_off_t));
- if (unlikely(off == TDB_OFF_ERR))
- goto unlock;
- if (unlikely(!off)) {
- tdb->ecode = TDB_ERR_CORRUPT;
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "find_bucket_and_lock: zero hash bucket!\n");
- goto unlock;
- }
- h = hash_record(tdb, off);
- /* FIXME: Encode extra hash bits! */
- if (tdb_write_off(tdb, newoff
- + (h & ((num * 2) - 1)) * sizeof(uint64_t),
- off) == -1)
- goto unlock;
+ /* We didn't like the existing one: remove it. */
+ if (old_off) {
+ add_free_record(tdb, old_zone, old_off,
+ sizeof(struct tdb_used_record)
+ + key.dsize + old_room);
+ if (replace_in_hash(tdb, h, new_off) == -1)
+ return -1;
+ } else {
+ if (add_to_hash(tdb, h, new_off) == -1)
+ return -1;
}
- /* Free up old hash. */
- r = tdb_get(tdb, tdb->header.v.hash_off, &pad, sizeof(*r));
- if (!r)
- goto unlock;
- add_free_record(tdb, tdb->header.v.hash_off,
- rec_data_length(r) + rec_extra_padding(r));
-
- /* Now we write the modified header. */
- tdb->header.v.generation++;
- tdb->header.v.hash_bits++;
- tdb->header.v.hash_off = newoff;
- tdb_write_convert(tdb, offsetof(struct tdb_header, v),
- &tdb->header.v, sizeof(tdb->header.v));
-unlock:
- tdb_allrecord_unlock(tdb, F_WRLCK);
+ new_off += sizeof(struct tdb_used_record);
+ if (tdb->methods->write(tdb, new_off, key.dptr, key.dsize) == -1)
+ return -1;
+
+ new_off += key.dsize;
+ if (tdb->methods->write(tdb, new_off, dbuf.dptr, dbuf.dsize) == -1)
+ return -1;
+
+ /* FIXME: tdb_increment_seqnum(tdb); */
+ return 0;
}
int tdb_store(struct tdb_context *tdb,
struct tdb_data key, struct tdb_data dbuf, int flag)
{
- tdb_off_t new_off, off, start, end, room;
- uint64_t h;
- bool growing = false;
+ struct hash_info h;
+ tdb_off_t off;
+ tdb_len_t old_room = 0;
+ struct tdb_used_record rec;
+ int ret;
- h = tdb_hash(tdb, key.dptr, key.dsize);
- off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_WRLCK);
- if (off == TDB_OFF_ERR)
+ off = find_and_lock(tdb, key, F_WRLCK, &h, &rec);
+ if (unlikely(off == TDB_OFF_ERR))
return -1;
/* Now we have lock on this hash bucket. */
}
} else {
if (off) {
- if (room >= key.dsize + dbuf.dsize) {
- new_off = off;
+ old_room = rec_data_length(&rec)
+ + rec_extra_padding(&rec);
+ if (old_room >= dbuf.dsize) {
+ /* Can modify in-place. Easy! */
if (update_rec_hdr(tdb, off,
key.dsize, dbuf.dsize,
- room, h))
+ &rec, h.h))
goto fail;
- goto write;
+ if (tdb->methods->write(tdb, off + sizeof(rec)
+ + key.dsize,
+ dbuf.dptr, dbuf.dsize))
+ goto fail;
+ tdb_unlock_hashes(tdb, h.hlock_start,
+ h.hlock_range, F_WRLCK);
+ return 0;
}
/* FIXME: See if right record is free? */
- /* Hint to allocator that we've realloced. */
- growing = true;
} else {
if (flag == TDB_MODIFY) {
/* if the record doesn't exist and we
}
}
- /* Allocate a new record. */
- new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
- if (new_off == 0) {
- unlock_lists(tdb, start, end, F_WRLCK);
- /* Expand, then try again... */
- if (tdb_expand(tdb, key.dsize, dbuf.dsize, growing) == -1)
- return -1;
- return tdb_store(tdb, key, dbuf, flag);
- }
+ /* If we didn't use the old record, this implies we're growing. */
+ ret = replace_data(tdb, &h, key, dbuf, off, old_room,
+ rec_zone_bits(&rec), off != 0);
+ tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK);
+ return ret;
+
+fail:
+ tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK);
+ return -1;
+}
+
+int tdb_append(struct tdb_context *tdb,
+ struct tdb_data key, struct tdb_data dbuf)
+{
+ struct hash_info h;
+ tdb_off_t off;
+ struct tdb_used_record rec;
+ tdb_len_t old_room = 0, old_dlen;
+ unsigned char *newdata;
+ struct tdb_data new_dbuf;
+ int ret;
+
+ off = find_and_lock(tdb, key, F_WRLCK, &h, &rec);
+ if (unlikely(off == TDB_OFF_ERR))
+ return -1;
- /* We didn't like the existing one: remove it. */
if (off) {
- add_free_record(tdb, off, sizeof(struct tdb_used_record)
- + key.dsize + room);
- }
+ old_dlen = rec_data_length(&rec);
+ old_room = old_dlen + rec_extra_padding(&rec);
-write:
- off = tdb->header.v.hash_off + end * sizeof(tdb_off_t);
- /* FIXME: Encode extra hash bits! */
- if (tdb_write_off(tdb, off, new_off) == -1)
- goto fail;
+ /* Fast path: can append in place. */
+ if (rec_extra_padding(&rec) >= dbuf.dsize) {
+ if (update_rec_hdr(tdb, off, key.dsize,
+ old_dlen + dbuf.dsize, &rec, h.h))
+ goto fail;
- off = new_off + sizeof(struct tdb_used_record);
- if (tdb->methods->write(tdb, off, key.dptr, key.dsize) == -1)
- goto fail;
- off += key.dsize;
- if (tdb->methods->write(tdb, off, dbuf.dptr, dbuf.dsize) == -1)
- goto fail;
+ off += sizeof(rec) + key.dsize + old_dlen;
+ if (tdb->methods->write(tdb, off, dbuf.dptr,
+ dbuf.dsize) == -1)
+ goto fail;
- /* FIXME: tdb_increment_seqnum(tdb); */
- unlock_lists(tdb, start, end, F_WRLCK);
+ /* FIXME: tdb_increment_seqnum(tdb); */
+ tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range,
+ F_WRLCK);
+ return 0;
+ }
+ /* FIXME: Check right record free? */
+
+ /* Slow path. */
+ newdata = malloc(key.dsize + old_dlen + dbuf.dsize);
+ if (!newdata) {
+ tdb->ecode = TDB_ERR_OOM;
+ tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+ "tdb_append: cannot allocate %llu bytes!\n",
+ (long long)key.dsize + old_dlen + dbuf.dsize);
+ goto fail;
+ }
+ if (tdb->methods->read(tdb, off + sizeof(rec) + key.dsize,
+ newdata, old_dlen) != 0) {
+ free(newdata);
+ goto fail;
+ }
+ memcpy(newdata + old_dlen, dbuf.dptr, dbuf.dsize);
+ new_dbuf.dptr = newdata;
+ new_dbuf.dsize = old_dlen + dbuf.dsize;
+ } else {
+ newdata = NULL;
+ new_dbuf = dbuf;
+ }
- /* By simple trial and error, this roughly approximates a 60%
- * full measure. */
- if (unlikely(end - start > 4 * tdb->header.v.hash_bits - 32))
- enlarge_hash(tdb);
+ /* If they're using tdb_append(), it implies they're growing record. */
+ ret = replace_data(tdb, &h, key, new_dbuf, off,
+ old_room, rec_zone_bits(&rec), true);
+ tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK);
+ free(newdata);
- return 0;
+ return ret;
fail:
- unlock_lists(tdb, start, end, F_WRLCK);
+ tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK);
return -1;
}
struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
{
- tdb_off_t off, start, end, room;
- uint64_t h;
- struct tdb_used_record pad, *r;
+ tdb_off_t off;
+ struct tdb_used_record rec;
+ struct hash_info h;
struct tdb_data ret;
- h = tdb_hash(tdb, key.dptr, key.dsize);
- off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_RDLCK);
- if (off == TDB_OFF_ERR)
+ off = find_and_lock(tdb, key, F_RDLCK, &h, &rec);
+ if (unlikely(off == TDB_OFF_ERR))
return tdb_null;
if (!off) {
- unlock_lists(tdb, start, end, F_RDLCK);
- tdb->ecode = TDB_SUCCESS;
- return tdb_null;
- }
-
- r = tdb_get(tdb, off, &pad, sizeof(*r));
- if (!r) {
- unlock_lists(tdb, start, end, F_RDLCK);
- return tdb_null;
+ tdb->ecode = TDB_ERR_NOEXIST;
+ ret = tdb_null;
+ } else {
+ ret.dsize = rec_data_length(&rec);
+ ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize,
+ ret.dsize);
}
- ret.dsize = rec_data_length(r);
- ret.dptr = tdb_alloc_read(tdb, off + sizeof(*r) + key.dsize,
- ret.dsize);
- unlock_lists(tdb, start, end, F_RDLCK);
+ tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_RDLCK);
return ret;
}
-static int hash_add(struct tdb_context *tdb, uint64_t h, tdb_off_t off)
+int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
{
- tdb_off_t i, hoff, len, num;
-
- i = (h & ((1ULL << tdb->header.v.hash_bits) - 1));
- hoff = tdb->header.v.hash_off + i * sizeof(tdb_off_t);
- len = (1ULL << tdb->header.v.hash_bits) - i;
-
- /* Look for next space. */
- num = tdb_find_zero_off(tdb, hoff, len);
- if (unlikely(num == len)) {
- hoff = tdb->header.v.hash_off;
- len = (1ULL << tdb->header.v.hash_bits);
- num = tdb_find_zero_off(tdb, hoff, len);
- if (i == len)
- return -1;
- }
- /* FIXME: Encode extra hash bits! */
- return tdb_write_off(tdb, hoff + num * sizeof(tdb_off_t), off);
-}
+ tdb_off_t off;
+ struct tdb_used_record rec;
+ struct hash_info h;
-static int unlink_used_record(struct tdb_context *tdb, tdb_off_t chain,
- uint64_t *extra_locks)
-{
- tdb_off_t num, len, i, hoff;
-
- /* FIXME: Maybe lock more in search? Maybe don't lock if scan
- * finds none? */
-again:
- len = (1ULL << tdb->header.v.hash_bits) - (chain + 1);
- hoff = tdb->header.v.hash_off + (chain + 1) * sizeof(tdb_off_t);
- num = tdb_find_zero_off(tdb, hoff, len);
-
- /* We want to lock the zero entry, too. In the wrap case,
- * this locks one extra. That's harmless. */
- num++;
-
- for (i = chain + 1; i < chain + 1 + num; i++) {
- if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_WAIT) == -1) {
- if (i != chain + 1)
- unlock_lists(tdb, chain + 1, i-1, F_WRLCK);
- return -1;
- }
- }
+ off = find_and_lock(tdb, key, F_WRLCK, &h, &rec);
+ if (unlikely(off == TDB_OFF_ERR))
+ return -1;
- /* The wrap case: we need those locks out of order! */
- if (unlikely(num == len + 1)) {
- *extra_locks = tdb_find_zero_off(tdb, tdb->header.v.hash_off,
- 1ULL << tdb->header.v.hash_bits);
- (*extra_locks)++;
- for (i = 0; i < *extra_locks; i++) {
- if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_NOWAIT)) {
- /* Failed. Caller must lock in order. */
- if (i)
- unlock_lists(tdb, 0, i-1, F_WRLCK);
- unlock_lists(tdb, chain + 1, chain + num,
- F_WRLCK);
- return 1;
- }
- }
- num += *extra_locks;
+ if (!off) {
+ tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK);
+ tdb->ecode = TDB_ERR_NOEXIST;
+ return -1;
}
- /* Now we have the locks, be certain that offset is still 0! */
- hoff = tdb->header.v.hash_off
- + (((chain + num) * sizeof(tdb_off_t))
- & ((1ULL << tdb->header.v.hash_bits) - 1));
-
- if (unlikely(tdb_read_off(tdb, hoff) != 0)) {
- unlock_lists(tdb, chain + 1, chain + num, F_WRLCK);
- goto again;
- }
+ if (delete_from_hash(tdb, &h) == -1)
+ goto unlock_err;
- /* OK, all locked. Unlink first one. */
- hoff = tdb->header.v.hash_off + chain * sizeof(tdb_off_t);
- if (tdb_write_off(tdb, hoff, 0) == -1)
+ /* Free the deleted entry. */
+ if (add_free_record(tdb, rec_zone_bits(&rec), off,
+ sizeof(struct tdb_used_record)
+ + rec_key_length(&rec)
+ + rec_data_length(&rec)
+ + rec_extra_padding(&rec)) != 0)
goto unlock_err;
- /* Rehash the rest. */
- for (i = 1; i < num; i++) {
- tdb_off_t off;
- uint64_t h;
-
- hoff = tdb->header.v.hash_off
- + (((chain + i) * sizeof(tdb_off_t))
- & ((1ULL << tdb->header.v.hash_bits) - 1));
- off = tdb_read_off(tdb, hoff);
- if (unlikely(off == TDB_OFF_ERR))
- goto unlock_err;
-
- /* Maybe use a bit to indicate it is in ideal place? */
- h = hash_record(tdb, off);
- /* Is it happy where it is? */
- if ((h & ((1ULL << tdb->header.v.hash_bits)-1)) == (chain + i))
- continue;
-
- /* Remove it. */
- if (tdb_write_off(tdb, hoff, 0) == -1)
- goto unlock_err;
-
- /* Rehash it. */
- if (hash_add(tdb, h, off) == -1)
- goto unlock_err;
- }
- unlock_lists(tdb, chain + 1, chain + num, F_WRLCK);
+ tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK);
return 0;
unlock_err:
- unlock_lists(tdb, chain + 1, chain + num, F_WRLCK);
+ tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK);
return -1;
}
-int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
-{
- tdb_off_t off, start, end, room, extra_locks = 0;
- uint64_t h;
- int ret;
-
- h = tdb_hash(tdb, key.dptr, key.dsize);
- off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_WRLCK);
- if (off == TDB_OFF_ERR)
- return -1;
-
- if (off == 0) {
- unlock_lists(tdb, start, end, F_WRLCK);
- tdb->ecode = TDB_ERR_NOEXIST;
- return -1;
- }
-
- ret = unlink_used_record(tdb, end, &extra_locks);
- if (unlikely(ret == 1)) {
- unsigned int i;
-
- unlock_lists(tdb, start, end, F_WRLCK);
-
- /* We need extra locks at the start. */
- for (i = 0; i < extra_locks; i++) {
- if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_WAIT)) {
- if (i)
- unlock_lists(tdb, 0, i-1, F_WRLCK);
- return -1;
- }
- }
- /* Try again now we're holding more locks. */
- ret = tdb_delete(tdb, key);
- unlock_lists(tdb, 0, i, F_WRLCK);
- return ret;
- }
- unlock_lists(tdb, start, end, F_WRLCK);
- return ret;
-}
-
int tdb_close(struct tdb_context *tdb)
{
struct tdb_context **i;
return ret;
}
+
+enum TDB_ERROR tdb_error(struct tdb_context *tdb)
+{
+ return tdb->ecode;
+}