sizeof(newdb.hdr.hash_test),
newdb.hdr.hash_seed,
tdb->hash_priv);
-
+ memset(newdb.hdr.reserved, 0, sizeof(newdb.hdr.reserved));
newdb.hdr.v.generation = 0;
/* The initial zone must cover the initial database size! */
return NULL;
}
-static tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
+tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
{
return tdb->header.v.hash_off
+ ((list & ((1ULL << tdb->header.v.hash_bits) - 1))
if (unlikely(num == len)) {
/* We wrapped. Look through start of hash table. */
+ i = 0;
hoff = hash_off(tdb, 0);
len = (1ULL << tdb->header.v.hash_bits);
num = tdb_find_zero_off(tdb, hoff, len);
- if (i == len) {
+ if (num == len) {
tdb->ecode = TDB_ERR_CORRUPT;
tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
"hash_add: full hash table!\n");
return -1;
}
}
+ if (tdb_read_off(tdb, hash_off(tdb, i + num)) != 0) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+ "hash_add: overwriting hash table?\n");
+ return -1;
+ }
+
/* FIXME: Encode extra hash bits! */
return tdb_write_off(tdb, hash_off(tdb, i + num), off);
}
tdb_len_t hlen;
uint64_t num = 1ULL << tdb->header.v.hash_bits;
struct tdb_used_record pad, *r;
+ unsigned int records = 0;
/* FIXME: We should do this without holding locks throughout. */
if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
goto oldheader;
if (off && hash_add(tdb, hash_record(tdb, off), off) == -1)
goto oldheader;
+ if (off)
+ records++;
}
+ tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
+ "enlarge_hash: moved %u records from %llu buckets.\n",
+ records, (long long)num);
+
/* Free up old hash. */
r = tdb_get(tdb, oldoff - sizeof(*r), &pad, sizeof(*r));
if (!r)
goto unlock;
}
+
+/* This is the slow version of the routine which searches the
+ * hashtable for an entry.
+ * We lock every hash bucket up to and including the next zero one.
+ */
+static tdb_off_t find_and_lock_slow(struct tdb_context *tdb,
+ struct tdb_data key,
+ uint64_t h,
+ int ltype,
+ tdb_off_t *start_lock,
+ tdb_len_t *num_locks,
+ tdb_off_t *bucket,
+ struct tdb_used_record *rec)
+{
+ /* Warning: this may drop the lock on *bucket! */
+ *num_locks = relock_hash_to_zero(tdb, *start_lock, ltype);
+ if (*num_locks == TDB_OFF_ERR)
+ return TDB_OFF_ERR;
+
+ for (*bucket = *start_lock;
+ *bucket < *start_lock + *num_locks;
+ (*bucket)++) {
+ tdb_off_t off = entry_matches(tdb, *bucket, h, &key, rec);
+ /* Empty entry or we found it? */
+ if (off == 0 || off != TDB_OFF_ERR)
+ return off;
+ }
+
+ /* We didn't find a zero entry? Something went badly wrong... */
+ unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype);
+ tdb->ecode = TDB_ERR_CORRUPT;
+ tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+ "find_and_lock: expected to find an empty hash bucket!\n");
+ return TDB_OFF_ERR;
+}
+
/* This is the core routine which searches the hashtable for an entry.
* On error, no locks are held and TDB_OFF_ERR is returned.
* Otherwise, *num_locks locks of type ltype from *start_lock are held.
}
/* Slow path, need to grab more locks and search. */
+ return find_and_lock_slow(tdb, key, h, ltype, start_lock, num_locks,
+ bucket, rec);
+}
- /* Warning: this may drop the lock on *bucket! */
- *num_locks = relock_hash_to_zero(tdb, *start_lock, ltype);
- if (*num_locks == TDB_OFF_ERR)
- return TDB_OFF_ERR;
+/* Returns -1 on error, 0 on OK, 1 on "expand and retry." */
+static int replace_data(struct tdb_context *tdb,
+ uint64_t h, struct tdb_data key, struct tdb_data dbuf,
+ tdb_off_t bucket,
+ tdb_off_t old_off, tdb_len_t old_room,
+ bool growing)
+{
+ tdb_off_t new_off;
- for (*bucket = *start_lock;
- *bucket < *start_lock + *num_locks;
- (*bucket)++) {
- off = entry_matches(tdb, *bucket, h, &key, rec);
- /* Empty entry or we found it? */
- if (off == 0 || off != TDB_OFF_ERR)
- return off;
- }
+ /* Allocate a new record. */
+ new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
+ if (unlikely(new_off == TDB_OFF_ERR))
+ return -1;
- /* We didn't find a zero entry? Something went badly wrong... */
- unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype);
- tdb->ecode = TDB_ERR_CORRUPT;
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "find_and_lock: expected to find an empty hash bucket!\n");
- return TDB_OFF_ERR;
+ if (unlikely(new_off == 0))
+ return 1;
+
+ /* We didn't like the existing one: remove it. */
+ if (old_off)
+ add_free_record(tdb, old_off,
+ sizeof(struct tdb_used_record)
+ + key.dsize + old_room);
+
+ /* FIXME: Encode extra hash bits! */
+ if (tdb_write_off(tdb, hash_off(tdb, bucket), new_off) == -1)
+ return -1;
+
+ new_off += sizeof(struct tdb_used_record);
+ if (tdb->methods->write(tdb, new_off, key.dptr, key.dsize) == -1)
+ return -1;
+
+ new_off += key.dsize;
+ if (tdb->methods->write(tdb, new_off, dbuf.dptr, dbuf.dsize) == -1)
+ return -1;
+
+ /* FIXME: tdb_increment_seqnum(tdb); */
+ return 0;
}
int tdb_store(struct tdb_context *tdb,
struct tdb_data key, struct tdb_data dbuf, int flag)
{
- tdb_off_t new_off, off, bucket, start, num;
+ tdb_off_t off, bucket, start, num;
+ tdb_len_t old_room = 0;
struct tdb_used_record rec;
uint64_t h;
- bool growing = false;
+ int ret;
h = tdb_hash(tdb, key.dptr, key.dsize);
off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
}
} else {
if (off) {
- if (rec_data_length(&rec) + rec_extra_padding(&rec)
- >= dbuf.dsize) {
- new_off = off;
+ old_room = rec_data_length(&rec)
+ + rec_extra_padding(&rec);
+ if (old_room >= dbuf.dsize) {
+ /* Can modify in-place. Easy! */
if (update_rec_hdr(tdb, off,
key.dsize, dbuf.dsize,
&rec, h))
goto fail;
- goto write;
+ if (tdb->methods->write(tdb, off + sizeof(rec)
+ + key.dsize,
+ dbuf.dptr, dbuf.dsize))
+ goto fail;
+ unlock_lists(tdb, start, num, F_WRLCK);
+ return 0;
}
/* FIXME: See if right record is free? */
- /* Hint to allocator that we've realloced. */
- growing = true;
} else {
if (flag == TDB_MODIFY) {
/* if the record doesn't exist and we
}
}
- /* Allocate a new record. */
- new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
- if (new_off == 0) {
- unlock_lists(tdb, start, num, F_WRLCK);
+ /* If we didn't use the old record, this implies we're growing. */
+ ret = replace_data(tdb, h, key, dbuf, bucket, off, old_room, off != 0);
+ unlock_lists(tdb, start, num, F_WRLCK);
+
+ if (unlikely(ret == 1)) {
/* Expand, then try again... */
- if (tdb_expand(tdb, key.dsize, dbuf.dsize, growing) == -1)
+ if (tdb_expand(tdb, key.dsize, dbuf.dsize, off != 0) == -1)
return -1;
return tdb_store(tdb, key, dbuf, flag);
}
- /* We didn't like the existing one: remove it. */
+ /* FIXME: by simple simulation, this approximated 60% full.
+ * Check in real case! */
+ if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
+ enlarge_hash(tdb);
+
+ return ret;
+
+fail:
+ unlock_lists(tdb, start, num, F_WRLCK);
+ return -1;
+}
+
+int tdb_append(struct tdb_context *tdb,
+ struct tdb_data key, struct tdb_data dbuf)
+{
+ tdb_off_t off, bucket, start, num;
+ struct tdb_used_record rec;
+ tdb_len_t old_room = 0, old_dlen;
+ uint64_t h;
+ unsigned char *newdata;
+ struct tdb_data new_dbuf;
+ int ret;
+
+ h = tdb_hash(tdb, key.dptr, key.dsize);
+ off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
+ if (unlikely(off == TDB_OFF_ERR))
+ return -1;
+
if (off) {
- add_free_record(tdb, off, sizeof(struct tdb_used_record)
- + rec_key_length(&rec)
- + rec_data_length(&rec)
- + rec_extra_padding(&rec));
- }
+ old_dlen = rec_data_length(&rec);
+ old_room = old_dlen + rec_extra_padding(&rec);
- /* FIXME: Encode extra hash bits! */
- if (tdb_write_off(tdb, hash_off(tdb, bucket), new_off) == -1)
- goto fail;
+ /* Fast path: can append in place. */
+ if (rec_extra_padding(&rec) >= dbuf.dsize) {
+ if (update_rec_hdr(tdb, off, key.dsize,
+ old_dlen + dbuf.dsize, &rec, h))
+ goto fail;
-write:
- off = new_off + sizeof(struct tdb_used_record);
- if (tdb->methods->write(tdb, off, key.dptr, key.dsize) == -1)
- goto fail;
- off += key.dsize;
- if (tdb->methods->write(tdb, off, dbuf.dptr, dbuf.dsize) == -1)
- goto fail;
+ off += sizeof(rec) + key.dsize + old_dlen;
+ if (tdb->methods->write(tdb, off, dbuf.dptr,
+ dbuf.dsize) == -1)
+ goto fail;
- /* FIXME: tdb_increment_seqnum(tdb); */
+ /* FIXME: tdb_increment_seqnum(tdb); */
+ unlock_lists(tdb, start, num, F_WRLCK);
+ return 0;
+ }
+ /* FIXME: Check right record free? */
+
+ /* Slow path. */
+ newdata = malloc(key.dsize + old_dlen + dbuf.dsize);
+ if (!newdata) {
+ tdb->ecode = TDB_ERR_OOM;
+ tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+ "tdb_append: cannot allocate %llu bytes!\n",
+ (long long)key.dsize + old_dlen + dbuf.dsize);
+ goto fail;
+ }
+ if (tdb->methods->read(tdb, off + sizeof(rec) + key.dsize,
+ newdata, old_dlen) != 0) {
+ free(newdata);
+ goto fail;
+ }
+ memcpy(newdata + old_dlen, dbuf.dptr, dbuf.dsize);
+ new_dbuf.dptr = newdata;
+ new_dbuf.dsize = old_dlen + dbuf.dsize;
+ } else {
+ newdata = NULL;
+ new_dbuf = dbuf;
+ }
+
+ /* If they're using tdb_append(), it implies they're growing record. */
+ ret = replace_data(tdb, h, key, new_dbuf, bucket, off, old_room, true);
unlock_lists(tdb, start, num, F_WRLCK);
+ free(newdata);
+
+ if (unlikely(ret == 1)) {
+ /* Expand, then try again. */
+ if (tdb_expand(tdb, key.dsize, dbuf.dsize, true) == -1)
+ return -1;
+ return tdb_append(tdb, key, dbuf);
+ }
/* FIXME: by simple simulation, this approximated 60% full.
* Check in real case! */
if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
enlarge_hash(tdb);
- return 0;
+ return ret;
fail:
unlock_lists(tdb, start, num, F_WRLCK);
uint64_t h;
h = tdb_hash(tdb, key.dptr, key.dsize);
- off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
+ start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
+ if (unlikely(start == TDB_OFF_ERR))
+ return -1;
+
+ /* FIXME: Fastpath: if next is zero, we can delete without lock,
+ * since this lock protects us. */
+ off = find_and_lock_slow(tdb, key, h, F_WRLCK,
+ &start, &num, &bucket, &rec);
if (unlikely(off == TDB_OFF_ERR))
return -1;
if (!off) {
+ /* FIXME: We could optimize not found case if it mattered, by
+ * reading offset after first lock: if it's zero, goto here. */
unlock_lists(tdb, start, num, F_WRLCK);
tdb->ecode = TDB_ERR_NOEXIST;
return -1;
}
+ /* Since we found the entry, we must have locked it and a zero. */
+ assert(num >= 2);
/* This actually unlinks it. */
if (tdb_write_off(tdb, hash_off(tdb, bucket), 0) == -1)
goto unlock_err;
/* Rehash anything following. */
- for (i = hash_off(tdb, bucket+1);
- i != hash_off(tdb, h + num - 1);
- i += sizeof(tdb_off_t)) {
- tdb_off_t off2;
+ for (i = bucket+1; i != bucket + num - 1; i++) {
+ tdb_off_t hoff, off2;
uint64_t h2;
- off2 = tdb_read_off(tdb, i);
+ hoff = hash_off(tdb, i);
+ off2 = tdb_read_off(tdb, hoff);
if (unlikely(off2 == TDB_OFF_ERR))
goto unlock_err;
/* Maybe use a bit to indicate it is in ideal place? */
h2 = hash_record(tdb, off2);
/* Is it happy where it is? */
- if (hash_off(tdb, h2) == i)
+ if (hash_off(tdb, h2) == hoff)
continue;
/* Remove it. */
- if (tdb_write_off(tdb, i, 0) == -1)
+ if (tdb_write_off(tdb, hoff, 0) == -1)
goto unlock_err;
/* Rehash it. */