uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off)
{
- struct tdb_used_record pad, *r;
+ const struct tdb_used_record *r;
const void *key;
uint64_t klen, hash;
- r = tdb_get(tdb, off, &pad, sizeof(pad));
+ r = tdb_access_read(tdb, off, sizeof(*r), true);
if (!r)
/* FIXME */
return 0;
klen = rec_key_length(r);
- key = tdb_access_read(tdb, off + sizeof(pad), klen, false);
+ tdb_access_release(tdb, r);
+
+ key = tdb_access_read(tdb, off + sizeof(*r), klen, false);
if (!key)
return 0;
tdb_off_t val,
struct tdb_used_record *rec)
{
- bool ret;
+ bool ret = false;
const unsigned char *rkey;
tdb_off_t off;
- /* FIXME: Handle hash value truncated. */
- if (bits(val, TDB_OFF_HASH_TRUNCATED_BIT, 1))
- abort();
-
+ add_stat(tdb, compares, 1);
/* Desired bucket must match. */
- if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK))
- return false;
+ if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK)) {
+ add_stat(tdb, compare_wrong_bucket, 1);
+ return ret;
+ }
/* Top bits of offset == next bits of hash. */
if (bits(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA)
!= bits(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
- TDB_OFF_UPPER_STEAL_EXTRA))
- return false;
+ TDB_OFF_UPPER_STEAL_EXTRA)) {
+ add_stat(tdb, compare_wrong_offsetbits, 1);
+ return ret;
+ }
off = val & TDB_OFF_MASK;
if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
- return false;
+ return ret;
/* FIXME: check extra bits in header? */
- if (rec_key_length(rec) != key->dsize)
- return false;
+ if (rec_key_length(rec) != key->dsize) {
+ add_stat(tdb, compare_wrong_keylen, 1);
+ return ret;
+ }
rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false);
if (!rkey)
- return false;
- ret = (memcmp(rkey, key->dptr, key->dsize) == 0);
+ return ret;
+ if (memcmp(rkey, key->dptr, key->dsize) == 0)
+ ret = true;
+ else
+ add_stat(tdb, compare_wrong_keycmp, 1);
tdb_access_release(tdb, rkey);
return ret;
}
+ (bucket % (1 << TDB_HASH_GROUP_BITS)) * sizeof(tdb_off_t);
}
-/* Truncated hashes can't be all 1: that's how we spot a sub-hash */
bool is_subhash(tdb_off_t val)
{
- return val >> (64-TDB_OFF_UPPER_STEAL) == (1<<TDB_OFF_UPPER_STEAL) - 1;
+ return (val >> TDB_OFF_UPPER_STEAL_SUBHASH_BIT) & 1;
}
/* FIXME: Guess the depth, don't over-lock! */
/* I wrote a simple test, expanding a hash to 2GB, for the following
* cases:
* 1) Expanding all the buckets at once,
- * 2) Expanding the most-populated bucket,
- * 3) Expanding the bucket we wanted to place the new entry ito.
+ * 2) Expanding the bucket we wanted to place the new entry into.
+ * 3) Expanding the most-populated bucket,
*
* I measured the worst/average/best density during this process.
* 1) 3%/16%/30%
if (hash_used + TDB_SUBLEVEL_HASH_BITS > 64)
abort();
- /* FIXME: Do truncated hash bits if we can! */
h.h = hash_record(tdb, off);
gnum = use_bits(&h, TDB_SUBLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS);
h.group_start = subhash + sizeof(struct tdb_used_record)
/* assert(num_vals); */
/* Overwrite expanded bucket with subhash pointer. */
- h->group[bucket] = subhash | ~((1ULL << (64 - TDB_OFF_UPPER_STEAL))-1);
+ h->group[bucket] = subhash | (1ULL << TDB_OFF_UPPER_STEAL_SUBHASH_BIT);
/* Put values back. */
for (i = 0; i < num_vals; i++) {
return -1;
}
if (rec_magic(&rec) != TDB_MAGIC) {
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "next_in_hash:"
- " corrupt record at %llu\n",
- (long long)off);
+ tdb_logerr(tdb, TDB_ERR_CORRUPT,
+ TDB_DEBUG_FATAL,
+ "next_in_hash:"
+ " corrupt record at %llu",
+ (long long)off);
return -1;
}