return ret;
}
-struct new_database {
+struct new_db_head {
struct tdb_header hdr;
+ struct free_zone_header zhdr;
+ tdb_off_t free[BUCKETS_FOR_ZONE(INITIAL_ZONE_BITS) + 1];
struct tdb_used_record hrec;
tdb_off_t hash[1ULL << INITIAL_HASH_BITS];
- struct tdb_used_record frec;
- tdb_off_t free[INITIAL_FREE_BUCKETS + 1]; /* One overflow bucket */
+ struct tdb_free_record frec;
+};
+
+struct new_database {
+ struct new_db_head h;
+ /* Rest up to 1 << INITIAL_ZONE_BITS is empty. */
+ char space[(1 << INITIAL_ZONE_BITS)
+ - (sizeof(struct new_db_head) - sizeof(struct tdb_header))];
+ uint8_t tailer;
+ /* Don't count final padding! */
};
/* initialise a new database */
{
/* We make it up in memory, then write it out if not internal */
struct new_database newdb;
- unsigned int magic_off = offsetof(struct tdb_header, magic_food);
+ unsigned int bucket, magic_off, dbsize;
- /* Fill in the header */
- newdb.hdr.version = TDB_VERSION;
- newdb.hdr.hash_seed = random_number(tdb);
- newdb.hdr.hash_test = TDB_HASH_MAGIC;
- newdb.hdr.hash_test = tdb->khash(&newdb.hdr.hash_test,
- sizeof(newdb.hdr.hash_test),
- newdb.hdr.hash_seed,
- tdb->hash_priv);
-
- newdb.hdr.v.generation = 0;
-
- /* The initial zone must cover the initial database size! */
- BUILD_ASSERT((1ULL << INITIAL_ZONE_BITS) >= sizeof(newdb));
-
- /* Free array has 1 zone, 10 buckets. All buckets empty. */
- newdb.hdr.v.num_zones = 1;
- newdb.hdr.v.zone_bits = INITIAL_ZONE_BITS;
- newdb.hdr.v.free_buckets = INITIAL_FREE_BUCKETS;
- newdb.hdr.v.free_off = offsetof(struct new_database, free);
- set_header(tdb, &newdb.frec, 0,
- sizeof(newdb.free), sizeof(newdb.free), 0);
- memset(newdb.free, 0, sizeof(newdb.free));
+ /* Don't want any extra padding! */
+ dbsize = offsetof(struct new_database, tailer) + sizeof(newdb.tailer);
+ /* Fill in the header */
+ newdb.h.hdr.version = TDB_VERSION;
+ newdb.h.hdr.hash_seed = random_number(tdb);
+ newdb.h.hdr.hash_test = TDB_HASH_MAGIC;
+ newdb.h.hdr.hash_test = tdb->khash(&newdb.h.hdr.hash_test,
+ sizeof(newdb.h.hdr.hash_test),
+ newdb.h.hdr.hash_seed,
+ tdb->hash_priv);
+ memset(newdb.h.hdr.reserved, 0, sizeof(newdb.h.hdr.reserved));
+ newdb.h.hdr.v.generation = 0;
/* Initial hashes are empty. */
- newdb.hdr.v.hash_bits = INITIAL_HASH_BITS;
- newdb.hdr.v.hash_off = offsetof(struct new_database, hash);
- set_header(tdb, &newdb.hrec, 0,
- sizeof(newdb.hash), sizeof(newdb.hash), 0);
- memset(newdb.hash, 0, sizeof(newdb.hash));
+ newdb.h.hdr.v.hash_bits = INITIAL_HASH_BITS;
+ newdb.h.hdr.v.hash_off = offsetof(struct new_database, h.hash);
+ set_header(tdb, &newdb.h.hrec, 0,
+ sizeof(newdb.h.hash), sizeof(newdb.h.hash), 0,
+ INITIAL_ZONE_BITS);
+ memset(newdb.h.hash, 0, sizeof(newdb.h.hash));
+
+ /* Create the single free entry. */
+ newdb.h.frec.magic_and_meta = TDB_FREE_MAGIC | INITIAL_ZONE_BITS;
+ newdb.h.frec.data_len = (sizeof(newdb.h.frec)
+ - sizeof(struct tdb_used_record)
+ + sizeof(newdb.space));
+
+ /* Free is mostly empty... */
+ newdb.h.zhdr.zone_bits = INITIAL_ZONE_BITS;
+ memset(newdb.h.free, 0, sizeof(newdb.h.free));
+
+ /* ... except for this one bucket. */
+ bucket = size_to_bucket(INITIAL_ZONE_BITS, newdb.h.frec.data_len);
+ newdb.h.free[bucket] = offsetof(struct new_database, h.frec);
+ newdb.h.frec.next = newdb.h.frec.prev = 0;
+
+ /* Clear free space to keep valgrind happy, and avoid leaking stack. */
+ memset(newdb.space, 0, sizeof(newdb.space));
+
+ /* Tailer contains maximum number of free_zone bits. */
+ newdb.tailer = INITIAL_ZONE_BITS;
/* Magic food */
- memset(newdb.hdr.magic_food, 0, sizeof(newdb.hdr.magic_food));
- strcpy(newdb.hdr.magic_food, TDB_MAGIC_FOOD);
+ memset(newdb.h.hdr.magic_food, 0, sizeof(newdb.h.hdr.magic_food));
+ strcpy(newdb.h.hdr.magic_food, TDB_MAGIC_FOOD);
/* This creates an endian-converted database, as if read from disk */
+ magic_off = offsetof(struct tdb_header, magic_food);
tdb_convert(tdb,
- (char *)&newdb.hdr + magic_off,
- sizeof(newdb) - magic_off);
+ (char *)&newdb.h.hdr + magic_off,
+ dbsize - 1 - magic_off);
- tdb->header = newdb.hdr;
+ tdb->header = newdb.h.hdr;
if (tdb->flags & TDB_INTERNAL) {
- tdb->map_size = sizeof(newdb);
+ tdb->map_size = dbsize;
tdb->map_ptr = malloc(tdb->map_size);
if (!tdb->map_ptr) {
tdb->ecode = TDB_ERR_OOM;
if (ftruncate(tdb->fd, 0) == -1)
return -1;
- if (!tdb_pwrite_all(tdb->fd, &newdb, sizeof(newdb), 0)) {
+ if (!tdb_pwrite_all(tdb->fd, &newdb, dbsize, 0)) {
tdb->ecode = TDB_ERR_IO;
return -1;
}
tdb->name = NULL;
tdb->map_ptr = NULL;
tdb->fd = -1;
- /* map_size will be set below. */
+ tdb->map_size = sizeof(struct tdb_header);
tdb->ecode = TDB_SUCCESS;
/* header will be read in below. */
tdb->header_uptodate = false;
tdb_io_init(tdb);
tdb_lock_init(tdb);
- /* FIXME */
- if (attr) {
- tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
- "tdb_open: attributes not yet supported\n");
- errno = EINVAL;
- goto fail;
+ while (attr) {
+ switch (attr->base.attr) {
+ case TDB_ATTRIBUTE_LOG:
+ tdb->log = attr->log.log_fn;
+ tdb->log_priv = attr->log.log_private;
+ break;
+ case TDB_ATTRIBUTE_HASH:
+ tdb->khash = attr->hash.hash_fn;
+ tdb->hash_priv = attr->hash.hash_private;
+ break;
+ default:
+ tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
+ "tdb_open: unknown attribute type %u\n",
+ attr->base.attr);
+ errno = EINVAL;
+ goto fail;
+ }
+ attr = attr->base.next;
}
if ((open_flags & O_ACCMODE) == O_WRONLY) {
}
TEST_IT(tdb->flags & TDB_CONVERT);
tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
- /* Zones don't matter for internal db. */
- tdb->last_zone = 0;
+ tdb_zone_init(tdb);
return tdb;
}
goto fail;
}
- tdb->map_size = st.st_size;
tdb->device = st.st_dev;
tdb->inode = st.st_ino;
- tdb_mmap(tdb);
tdb_unlock_open(tdb);
- tdb_zone_init(tdb);
+
+ /* This make sure we have current map_size and mmap. */
+ tdb->methods->oob(tdb, tdb->map_size + 1, true);
+
+ /* Now we can pick a random free zone to start from. */
+ if (tdb_zone_init(tdb) == -1)
+ goto fail;
tdb->next = tdbs;
tdbs = tdb;
return NULL;
}
-static tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
+tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
{
return tdb->header.v.hash_off
+ ((list & ((1ULL << tdb->header.v.hash_bits) - 1))
uint64_t keylen;
const unsigned char *rkey;
+ list &= ((1ULL << tdb->header.v.hash_bits) - 1);
+
off = tdb_read_off(tdb, tdb->header.v.hash_off
+ list * sizeof(tdb_off_t));
if (off == 0 || off == TDB_OFF_ERR)
tdb_off_t i;
for (i = list; i < list + num; i++) {
- if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT) != 0) {
+ if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT)
+ == TDB_OFF_ERR) {
unlock_lists(tdb, list, i - list, ltype);
return -1;
}
static tdb_len_t relock_hash_to_zero(struct tdb_context *tdb,
tdb_off_t start, int ltype)
{
- tdb_len_t num, len, pre_locks;
+ tdb_len_t num, len;
again:
num = 1ULL << tdb->header.v.hash_bits;
if (unlikely(len == num - start)) {
/* We hit the end of the hash range. Drop lock: we have
to lock start of hash first. */
+ tdb_len_t pre_locks;
+
tdb_unlock_list(tdb, start, ltype);
+
/* Grab something, so header is stable. */
if (tdb_lock_list(tdb, 0, ltype, TDB_LOCK_WAIT))
return TDB_OFF_ERR;
- len = tdb_find_zero_off(tdb, hash_off(tdb, 0), num);
- if (lock_lists(tdb, 1, len, ltype) == -1) {
+ pre_locks = tdb_find_zero_off(tdb, hash_off(tdb, 0), num);
+ /* We want to lock the zero entry as well. */
+ pre_locks++;
+ if (lock_lists(tdb, 1, pre_locks - 1, ltype) == -1) {
tdb_unlock_list(tdb, 0, ltype);
return TDB_OFF_ERR;
}
- pre_locks = len;
- len = num - start;
- } else {
- /* We already have lock on start. */
- start++;
- pre_locks = 0;
- }
- if (unlikely(lock_lists(tdb, start, len, ltype) == -1)) {
- if (pre_locks)
+
+ /* Now lock later ones. */
+ if (unlikely(lock_lists(tdb, start, len, ltype) == -1)) {
unlock_lists(tdb, 0, pre_locks, ltype);
- else
+ return TDB_OFF_ERR;
+ }
+ len += pre_locks;
+ } else {
+ /* We want to lock the zero entry as well. */
+ len++;
+ /* But we already have lock on start. */
+ if (unlikely(lock_lists(tdb, start+1, len-1, ltype) == -1)) {
tdb_unlock_list(tdb, start, ltype);
- return TDB_OFF_ERR;
+ return TDB_OFF_ERR;
+ }
}
/* Now, did we lose the race, and it's not zero any more? */
- if (unlikely(tdb_read_off(tdb, hash_off(tdb, pre_locks + len)) != 0)) {
- unlock_lists(tdb, 0, pre_locks, ltype);
+ if (unlikely(tdb_read_off(tdb, hash_off(tdb, start + len - 1)) != 0)) {
/* Leave the start locked, as expected. */
unlock_lists(tdb, start + 1, len - 1, ltype);
goto again;
}
- return pre_locks + len;
+ return len;
}
/* FIXME: modify, don't rewrite! */
{
uint64_t dataroom = rec_data_length(rec) + rec_extra_padding(rec);
- if (set_header(tdb, rec, keylen, datalen, keylen + dataroom, h))
+ if (set_header(tdb, rec, keylen, datalen, keylen + dataroom, h,
+ rec_zone_bits(rec)))
return -1;
return tdb_write_convert(tdb, off, rec, sizeof(*rec));
}
+static int hash_add(struct tdb_context *tdb,
+ uint64_t hash, tdb_off_t off)
+{
+ tdb_off_t i, hoff, len, num;
+
+ /* Look for next space. */
+ i = (hash & ((1ULL << tdb->header.v.hash_bits) - 1));
+ len = (1ULL << tdb->header.v.hash_bits) - i;
+ num = tdb_find_zero_off(tdb, hash_off(tdb, i), len);
+
+ if (unlikely(num == len)) {
+ /* We wrapped. Look through start of hash table. */
+ i = 0;
+ hoff = hash_off(tdb, 0);
+ len = (1ULL << tdb->header.v.hash_bits);
+ num = tdb_find_zero_off(tdb, hoff, len);
+ if (num == len) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+ "hash_add: full hash table!\n");
+ return -1;
+ }
+ }
+ if (tdb_read_off(tdb, hash_off(tdb, i + num)) != 0) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+ "hash_add: overwriting hash table?\n");
+ return -1;
+ }
+
+ /* FIXME: Encode extra hash bits! */
+ return tdb_write_off(tdb, hash_off(tdb, i + num), off);
+}
+
/* If we fail, others will try after us. */
static void enlarge_hash(struct tdb_context *tdb)
{
tdb_off_t newoff, oldoff, i;
tdb_len_t hlen;
- uint64_t h, num = 1ULL << tdb->header.v.hash_bits;
+ uint64_t num = 1ULL << tdb->header.v.hash_bits;
struct tdb_used_record pad, *r;
+ unsigned int records = 0;
/* FIXME: We should do this without holding locks throughout. */
if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
newoff = alloc(tdb, 0, hlen, 0, false);
if (unlikely(newoff == TDB_OFF_ERR))
goto unlock;
- if (unlikely(newoff == 0)) {
- if (tdb_expand(tdb, 0, hlen, false) == -1)
- goto unlock;
- newoff = alloc(tdb, 0, hlen, 0, false);
- if (newoff == TDB_OFF_ERR || newoff == 0)
- goto unlock;
- }
/* Step over record header! */
newoff += sizeof(struct tdb_used_record);
if (zero_out(tdb, newoff, hlen) == -1)
goto unlock;
+ /* Update header now so we can use normal routines. */
+ oldoff = tdb->header.v.hash_off;
+
+ tdb->header.v.hash_bits++;
+ tdb->header.v.hash_off = newoff;
+
/* FIXME: If the space before is empty, we know this is in its ideal
* location. Or steal a bit from the pointer to avoid rehash. */
- for (i = tdb_find_nonzero_off(tdb, hash_off(tdb, 0), num);
- i < num;
- i += tdb_find_nonzero_off(tdb, hash_off(tdb, i), num - i)) {
+ for (i = 0; i < num; i++) {
tdb_off_t off;
- off = tdb_read_off(tdb, hash_off(tdb, i));
+ off = tdb_read_off(tdb, oldoff + i * sizeof(tdb_off_t));
if (unlikely(off == TDB_OFF_ERR))
- goto unlock;
- if (unlikely(!off)) {
- tdb->ecode = TDB_ERR_CORRUPT;
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "find_bucket_and_lock: zero hash bucket!\n");
- goto unlock;
- }
- h = hash_record(tdb, off);
- /* FIXME: Encode extra hash bits! */
- if (tdb_write_off(tdb, newoff
- + (h & ((num * 2) - 1)) * sizeof(uint64_t),
- off) == -1)
- goto unlock;
+ goto oldheader;
+ if (off && hash_add(tdb, hash_record(tdb, off), off) == -1)
+ goto oldheader;
+ if (off)
+ records++;
}
+ tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
+ "enlarge_hash: moved %u records from %llu buckets.\n",
+ records, (long long)num);
+
/* Free up old hash. */
- oldoff = tdb->header.v.hash_off - sizeof(*r);
- r = tdb_get(tdb, oldoff, &pad, sizeof(*r));
+ r = tdb_get(tdb, oldoff - sizeof(*r), &pad, sizeof(*r));
if (!r)
- goto unlock;
- add_free_record(tdb, oldoff,
+ goto oldheader;
+ add_free_record(tdb, rec_zone_bits(r), oldoff - sizeof(*r),
sizeof(*r)+rec_data_length(r)+rec_extra_padding(r));
/* Now we write the modified header. */
- tdb->header.v.hash_bits++;
- tdb->header.v.hash_off = newoff;
write_header(tdb);
unlock:
tdb_allrecord_unlock(tdb, F_WRLCK);
+ return;
+
+oldheader:
+ tdb->header.v.hash_bits--;
+ tdb->header.v.hash_off = oldoff;
+ goto unlock;
+}
+
+
+/* This is the slow version of the routine which searches the
+ * hashtable for an entry.
+ * We lock every hash bucket up to and including the next zero one.
+ */
+static tdb_off_t find_and_lock_slow(struct tdb_context *tdb,
+ struct tdb_data key,
+ uint64_t h,
+ int ltype,
+ tdb_off_t *start_lock,
+ tdb_len_t *num_locks,
+ tdb_off_t *bucket,
+ struct tdb_used_record *rec)
+{
+ /* Warning: this may drop the lock on *bucket! */
+ *num_locks = relock_hash_to_zero(tdb, *start_lock, ltype);
+ if (*num_locks == TDB_OFF_ERR)
+ return TDB_OFF_ERR;
+
+ for (*bucket = *start_lock;
+ *bucket < *start_lock + *num_locks;
+ (*bucket)++) {
+ tdb_off_t off = entry_matches(tdb, *bucket, h, &key, rec);
+ /* Empty entry or we found it? */
+ if (off == 0 || off != TDB_OFF_ERR)
+ return off;
+ }
+
+ /* We didn't find a zero entry? Something went badly wrong... */
+ unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype);
+ tdb->ecode = TDB_ERR_CORRUPT;
+ tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+ "find_and_lock: expected to find an empty hash bucket!\n");
+ return TDB_OFF_ERR;
+}
+
+/* This is the core routine which searches the hashtable for an entry.
+ * On error, no locks are held and TDB_OFF_ERR is returned.
+ * Otherwise, *num_locks locks of type ltype from *start_lock are held.
+ * The bucket where the entry is (or would be) is in *bucket.
+ * If not found, the return value is 0.
+ * If found, the return value is the offset, and *rec is the record. */
+static tdb_off_t find_and_lock(struct tdb_context *tdb,
+ struct tdb_data key,
+ uint64_t h,
+ int ltype,
+ tdb_off_t *start_lock,
+ tdb_len_t *num_locks,
+ tdb_off_t *bucket,
+ struct tdb_used_record *rec)
+{
+ tdb_off_t off;
+
+ /* FIXME: can we avoid locks for some fast paths? */
+ *start_lock = tdb_lock_list(tdb, h, ltype, TDB_LOCK_WAIT);
+ if (*start_lock == TDB_OFF_ERR)
+ return TDB_OFF_ERR;
+
+ /* Fast path. */
+ off = entry_matches(tdb, *start_lock, h, &key, rec);
+ if (likely(off != TDB_OFF_ERR)) {
+ *bucket = *start_lock;
+ *num_locks = 1;
+ return off;
+ }
+
+ /* Slow path, need to grab more locks and search. */
+ return find_and_lock_slow(tdb, key, h, ltype, start_lock, num_locks,
+ bucket, rec);
+}
+
+/* Returns -1 on error, 0 on OK" */
+static int replace_data(struct tdb_context *tdb,
+ uint64_t h, struct tdb_data key, struct tdb_data dbuf,
+ tdb_off_t bucket,
+ tdb_off_t old_off, tdb_len_t old_room,
+ unsigned old_zone,
+ bool growing)
+{
+ tdb_off_t new_off;
+
+ /* Allocate a new record. */
+ new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
+ if (unlikely(new_off == TDB_OFF_ERR))
+ return -1;
+
+ /* We didn't like the existing one: remove it. */
+ if (old_off)
+ add_free_record(tdb, old_zone, old_off,
+ sizeof(struct tdb_used_record)
+ + key.dsize + old_room);
+
+ /* FIXME: Encode extra hash bits! */
+ if (tdb_write_off(tdb, hash_off(tdb, bucket), new_off) == -1)
+ return -1;
+
+ new_off += sizeof(struct tdb_used_record);
+ if (tdb->methods->write(tdb, new_off, key.dptr, key.dsize) == -1)
+ return -1;
+
+ new_off += key.dsize;
+ if (tdb->methods->write(tdb, new_off, dbuf.dptr, dbuf.dsize) == -1)
+ return -1;
+
+ /* FIXME: tdb_increment_seqnum(tdb); */
+ return 0;
}
int tdb_store(struct tdb_context *tdb,
struct tdb_data key, struct tdb_data dbuf, int flag)
{
- tdb_off_t new_off, off, old_bucket, start, num_locks = 1;
+ tdb_off_t off, bucket, start, num;
+ tdb_len_t old_room = 0;
struct tdb_used_record rec;
uint64_t h;
- bool growing = false;
+ int ret;
h = tdb_hash(tdb, key.dptr, key.dsize);
-
- /* FIXME: can we avoid locks for some fast paths? */
- start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
- if (start == TDB_OFF_ERR)
+ off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
+ if (unlikely(off == TDB_OFF_ERR))
return -1;
- /* Fast path. */
- old_bucket = start;
- off = entry_matches(tdb, start, h, &key, &rec);
- if (unlikely(off == TDB_OFF_ERR)) {
- /* Slow path, need to grab more locks and search. */
- tdb_off_t i;
-
- /* Warning: this may drop the lock! Does that on error. */
- num_locks = relock_hash_to_zero(tdb, start, F_WRLCK);
- if (num_locks == TDB_OFF_ERR)
- return -1;
-
- for (i = start; i < start + num_locks; i++) {
- off = entry_matches(tdb, i, h, &key, &rec);
- /* Empty entry or we found it? */
- if (off == 0 || off != TDB_OFF_ERR) {
- old_bucket = i;
- break;
- }
- }
- if (i == start + num_locks)
- off = 0;
- }
-
/* Now we have lock on this hash bucket. */
if (flag == TDB_INSERT) {
if (off) {
}
} else {
if (off) {
- if (rec_data_length(&rec) + rec_extra_padding(&rec)
- >= dbuf.dsize) {
- new_off = off;
+ old_room = rec_data_length(&rec)
+ + rec_extra_padding(&rec);
+ if (old_room >= dbuf.dsize) {
+ /* Can modify in-place. Easy! */
if (update_rec_hdr(tdb, off,
key.dsize, dbuf.dsize,
&rec, h))
goto fail;
- goto write;
+ if (tdb->methods->write(tdb, off + sizeof(rec)
+ + key.dsize,
+ dbuf.dptr, dbuf.dsize))
+ goto fail;
+ unlock_lists(tdb, start, num, F_WRLCK);
+ return 0;
}
/* FIXME: See if right record is free? */
- /* Hint to allocator that we've realloced. */
- growing = true;
} else {
if (flag == TDB_MODIFY) {
/* if the record doesn't exist and we
}
}
- /* Allocate a new record. */
- new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
- if (new_off == 0) {
- unlock_lists(tdb, start, num_locks, F_WRLCK);
- /* Expand, then try again... */
- if (tdb_expand(tdb, key.dsize, dbuf.dsize, growing) == -1)
- return -1;
- return tdb_store(tdb, key, dbuf, flag);
- }
+ /* If we didn't use the old record, this implies we're growing. */
+ ret = replace_data(tdb, h, key, dbuf, bucket, off, old_room,
+ rec_zone_bits(&rec), off != 0);
+ unlock_lists(tdb, start, num, F_WRLCK);
+
+ /* FIXME: by simple simulation, this approximated 60% full.
+ * Check in real case! */
+ if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
+ enlarge_hash(tdb);
+
+ return ret;
+
+fail:
+ unlock_lists(tdb, start, num, F_WRLCK);
+ return -1;
+}
+
+int tdb_append(struct tdb_context *tdb,
+ struct tdb_data key, struct tdb_data dbuf)
+{
+ tdb_off_t off, bucket, start, num;
+ struct tdb_used_record rec;
+ tdb_len_t old_room = 0, old_dlen;
+ uint64_t h;
+ unsigned char *newdata;
+ struct tdb_data new_dbuf;
+ int ret;
+
+ h = tdb_hash(tdb, key.dptr, key.dsize);
+ off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
+ if (unlikely(off == TDB_OFF_ERR))
+ return -1;
- /* We didn't like the existing one: remove it. */
if (off) {
- add_free_record(tdb, off, sizeof(struct tdb_used_record)
- + rec_key_length(&rec)
- + rec_data_length(&rec)
- + rec_extra_padding(&rec));
- }
+ old_dlen = rec_data_length(&rec);
+ old_room = old_dlen + rec_extra_padding(&rec);
- /* FIXME: Encode extra hash bits! */
- if (tdb_write_off(tdb, hash_off(tdb, old_bucket), new_off) == -1)
- goto fail;
+ /* Fast path: can append in place. */
+ if (rec_extra_padding(&rec) >= dbuf.dsize) {
+ if (update_rec_hdr(tdb, off, key.dsize,
+ old_dlen + dbuf.dsize, &rec, h))
+ goto fail;
-write:
- off = new_off + sizeof(struct tdb_used_record);
- if (tdb->methods->write(tdb, off, key.dptr, key.dsize) == -1)
- goto fail;
- off += key.dsize;
- if (tdb->methods->write(tdb, off, dbuf.dptr, dbuf.dsize) == -1)
- goto fail;
+ off += sizeof(rec) + key.dsize + old_dlen;
+ if (tdb->methods->write(tdb, off, dbuf.dptr,
+ dbuf.dsize) == -1)
+ goto fail;
- /* FIXME: tdb_increment_seqnum(tdb); */
- unlock_lists(tdb, start, num_locks, F_WRLCK);
+ /* FIXME: tdb_increment_seqnum(tdb); */
+ unlock_lists(tdb, start, num, F_WRLCK);
+ return 0;
+ }
+ /* FIXME: Check right record free? */
+
+ /* Slow path. */
+ newdata = malloc(key.dsize + old_dlen + dbuf.dsize);
+ if (!newdata) {
+ tdb->ecode = TDB_ERR_OOM;
+ tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+ "tdb_append: cannot allocate %llu bytes!\n",
+ (long long)key.dsize + old_dlen + dbuf.dsize);
+ goto fail;
+ }
+ if (tdb->methods->read(tdb, off + sizeof(rec) + key.dsize,
+ newdata, old_dlen) != 0) {
+ free(newdata);
+ goto fail;
+ }
+ memcpy(newdata + old_dlen, dbuf.dptr, dbuf.dsize);
+ new_dbuf.dptr = newdata;
+ new_dbuf.dsize = old_dlen + dbuf.dsize;
+ } else {
+ newdata = NULL;
+ new_dbuf = dbuf;
+ }
+
+ /* If they're using tdb_append(), it implies they're growing record. */
+ ret = replace_data(tdb, h, key, new_dbuf, bucket, off, old_room,
+ rec_zone_bits(&rec), true);
+ unlock_lists(tdb, start, num, F_WRLCK);
+ free(newdata);
/* FIXME: by simple simulation, this approximated 60% full.
* Check in real case! */
- if (unlikely(num_locks > 4 * tdb->header.v.hash_bits - 31))
+ if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
enlarge_hash(tdb);
- return 0;
+ return ret;
fail:
- unlock_lists(tdb, start, num_locks, F_WRLCK);
+ unlock_lists(tdb, start, num, F_WRLCK);
return -1;
}
struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
{
- tdb_off_t off, start, num_locks = 1;
+ tdb_off_t off, start, num, bucket;
struct tdb_used_record rec;
uint64_t h;
struct tdb_data ret;
h = tdb_hash(tdb, key.dptr, key.dsize);
-
- /* FIXME: can we avoid locks for some fast paths? */
- start = tdb_lock_list(tdb, h, F_RDLCK, TDB_LOCK_WAIT);
- if (start == TDB_OFF_ERR)
+ off = find_and_lock(tdb, key, h, F_RDLCK, &start, &num, &bucket, &rec);
+ if (unlikely(off == TDB_OFF_ERR))
return tdb_null;
- /* Fast path. */
- off = entry_matches(tdb, start, h, &key, &rec);
- if (unlikely(off == TDB_OFF_ERR)) {
- /* Slow path, need to grab more locks and search. */
- tdb_off_t i;
-
- /* Warning: this may drop the lock! Does that on error. */
- num_locks = relock_hash_to_zero(tdb, start, F_RDLCK);
- if (num_locks == TDB_OFF_ERR)
- return tdb_null;
-
- for (i = start; i < start + num_locks; i++) {
- off = entry_matches(tdb, i, h, &key, &rec);
- /* Empty entry or we found it? */
- if (off == 0 || off != TDB_OFF_ERR)
- break;
- }
- if (i == start + num_locks)
- off = 0;
- }
-
if (!off) {
- unlock_lists(tdb, start, num_locks, F_RDLCK);
tdb->ecode = TDB_ERR_NOEXIST;
- return tdb_null;
+ ret = tdb_null;
+ } else {
+ ret.dsize = rec_data_length(&rec);
+ ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize,
+ ret.dsize);
}
- ret.dsize = rec_data_length(&rec);
- ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize,
- ret.dsize);
- unlock_lists(tdb, start, num_locks, F_RDLCK);
+ unlock_lists(tdb, start, num, F_RDLCK);
return ret;
}
-static int hash_add(struct tdb_context *tdb, uint64_t h, tdb_off_t off)
-{
- tdb_off_t i, hoff, len, num;
-
- /* Look for next space. */
- i = (h & ((1ULL << tdb->header.v.hash_bits) - 1));
- len = (1ULL << tdb->header.v.hash_bits) - i;
- num = tdb_find_zero_off(tdb, hash_off(tdb, i), len);
-
- if (unlikely(num == len)) {
- /* We wrapped. Look through start of hash table. */
- hoff = hash_off(tdb, 0);
- len = (1ULL << tdb->header.v.hash_bits);
- num = tdb_find_zero_off(tdb, hoff, len);
- if (i == len) {
- tdb->ecode = TDB_ERR_CORRUPT;
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "hash_add: full hash table!\n");
- return -1;
- }
- }
- /* FIXME: Encode extra hash bits! */
- return tdb_write_off(tdb, hash_off(tdb, i + num), off);
-}
-
int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
{
- tdb_off_t i, old_bucket, off, start, num_locks = 1;
+ tdb_off_t i, bucket, off, start, num;
struct tdb_used_record rec;
uint64_t h;
h = tdb_hash(tdb, key.dptr, key.dsize);
-
- /* FIXME: can we avoid locks for some fast paths? */
start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
- if (start == TDB_OFF_ERR)
+ if (unlikely(start == TDB_OFF_ERR))
return -1;
- /* Fast path. */
- old_bucket = start;
- off = entry_matches(tdb, start, h, &key, &rec);
- if (off && off != TDB_OFF_ERR) {
- /* We can only really fastpath delete if next bucket
- * is 0. Note that we haven't locked it, but our lock
- * on this bucket stops anyone overflowing into it
- * while we look. */
- if (tdb_read_off(tdb, hash_off(tdb, h+1)) == 0)
- goto delete;
- /* Slow path. */
- off = TDB_OFF_ERR;
- }
-
- if (unlikely(off == TDB_OFF_ERR)) {
- /* Slow path, need to grab more locks and search. */
- tdb_off_t i;
-
- /* Warning: this may drop the lock! Does that on error. */
- num_locks = relock_hash_to_zero(tdb, start, F_WRLCK);
- if (num_locks == TDB_OFF_ERR)
- return -1;
-
- for (i = start; i < start + num_locks; i++) {
- off = entry_matches(tdb, i, h, &key, &rec);
- /* Empty entry or we found it? */
- if (off == 0 || off != TDB_OFF_ERR) {
- old_bucket = i;
- break;
- }
- }
- if (i == start + num_locks)
- off = 0;
- }
+ /* FIXME: Fastpath: if next is zero, we can delete without lock,
+ * since this lock protects us. */
+ off = find_and_lock_slow(tdb, key, h, F_WRLCK,
+ &start, &num, &bucket, &rec);
+ if (unlikely(off == TDB_OFF_ERR))
+ return -1;
if (!off) {
- unlock_lists(tdb, start, num_locks, F_WRLCK);
+ /* FIXME: We could optimize not found case if it mattered, by
+ * reading offset after first lock: if it's zero, goto here. */
+ unlock_lists(tdb, start, num, F_WRLCK);
tdb->ecode = TDB_ERR_NOEXIST;
return -1;
}
+ /* Since we found the entry, we must have locked it and a zero. */
+ assert(num >= 2);
-delete:
/* This actually unlinks it. */
- if (tdb_write_off(tdb, hash_off(tdb, old_bucket), 0) == -1)
+ if (tdb_write_off(tdb, hash_off(tdb, bucket), 0) == -1)
goto unlock_err;
/* Rehash anything following. */
- for (i = hash_off(tdb, old_bucket+1);
- i != hash_off(tdb, h + num_locks);
- i += sizeof(tdb_off_t)) {
- tdb_off_t off2;
+ for (i = bucket+1; i != bucket + num - 1; i++) {
+ tdb_off_t hoff, off2;
uint64_t h2;
- off2 = tdb_read_off(tdb, i);
+ hoff = hash_off(tdb, i);
+ off2 = tdb_read_off(tdb, hoff);
if (unlikely(off2 == TDB_OFF_ERR))
goto unlock_err;
+ /* This can happen if we raced. */
+ if (unlikely(off2 == 0))
+ break;
+
/* Maybe use a bit to indicate it is in ideal place? */
h2 = hash_record(tdb, off2);
/* Is it happy where it is? */
- if (hash_off(tdb, h2) == i)
+ if (hash_off(tdb, h2) == hoff)
continue;
/* Remove it. */
- if (tdb_write_off(tdb, i, 0) == -1)
+ if (tdb_write_off(tdb, hoff, 0) == -1)
goto unlock_err;
/* Rehash it. */
}
/* Free the deleted entry. */
- if (add_free_record(tdb, off,
+ if (add_free_record(tdb, rec_zone_bits(&rec), off,
sizeof(struct tdb_used_record)
+ rec_key_length(&rec)
+ rec_data_length(&rec)
+ rec_extra_padding(&rec)) != 0)
goto unlock_err;
- unlock_lists(tdb, start, num_locks, F_WRLCK);
+ unlock_lists(tdb, start, num, F_WRLCK);
return 0;
unlock_err:
- unlock_lists(tdb, start, num_locks, F_WRLCK);
+ unlock_lists(tdb, start, num, F_WRLCK);
return -1;
}