X-Git-Url: https://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Ftdb2%2Fhash.c;h=260c36f266f0dc8dccaa331993153fe69d6c9a64;hp=51874918c62ef4fb53e5581c252e8ace6301de9a;hb=1d4d21dfb5ac43274afc125f132d196ce07f3177;hpb=c5e3f07a30056cb7c3c380bf690f80815ca4b6b1 diff --git a/ccan/tdb2/hash.c b/ccan/tdb2/hash.c index 51874918..260c36f2 100644 --- a/ccan/tdb2/hash.c +++ b/ccan/tdb2/hash.c @@ -1,7 +1,7 @@ - /* + /* Trivial Database 2: hash handling Copyright (C) Rusty Russell 2010 - + This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either @@ -42,19 +42,25 @@ uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len) uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off) { - struct tdb_used_record pad, *r; + const struct tdb_used_record *r; const void *key; uint64_t klen, hash; - r = tdb_get(tdb, off, &pad, sizeof(pad)); - if (!r) + r = tdb_access_read(tdb, off, sizeof(*r), true); + if (TDB_PTR_IS_ERR(r)) { + tdb->ecode = TDB_PTR_ERR(r); /* FIXME */ return 0; + } klen = rec_key_length(r); - key = tdb_access_read(tdb, off + sizeof(pad), klen, false); - if (!key) + tdb_access_release(tdb, r); + + key = tdb_access_read(tdb, off + sizeof(*r), klen, false); + if (TDB_PTR_IS_ERR(key)) { + tdb->ecode = TDB_PTR_ERR(key); return 0; + } hash = tdb_hash(tdb, key, klen); tdb_access_release(tdb, key); @@ -62,7 +68,7 @@ uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off) } /* Get bits from a value. */ -static uint32_t bits(uint64_t val, unsigned start, unsigned num) +static uint32_t bits_from(uint64_t val, unsigned start, unsigned num) { assert(num <= 32); return (val >> start) & ((1U << num) - 1); @@ -73,7 +79,33 @@ static uint32_t bits(uint64_t val, unsigned start, unsigned num) static uint32_t use_bits(struct hash_info *h, unsigned num) { h->hash_used += num; - return bits(h->h, 64 - h->hash_used, num); + return bits_from(h->h, 64 - h->hash_used, num); +} + +static bool key_matches(struct tdb_context *tdb, + const struct tdb_used_record *rec, + tdb_off_t off, + const struct tdb_data *key) +{ + bool ret = false; + const char *rkey; + + if (rec_key_length(rec) != key->dsize) { + add_stat(tdb, compare_wrong_keylen, 1); + return ret; + } + + rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false); + if (TDB_PTR_IS_ERR(rkey)) { + tdb->ecode = TDB_PTR_ERR(rkey); + return ret; + } + if (memcmp(rkey, key->dptr, key->dsize) == 0) + ret = true; + else + add_stat(tdb, compare_wrong_keycmp, 1); + tdb_access_release(tdb, rkey); + return ret; } /* Does entry match? */ @@ -83,38 +115,37 @@ static bool match(struct tdb_context *tdb, tdb_off_t val, struct tdb_used_record *rec) { - bool ret; - const unsigned char *rkey; tdb_off_t off; + enum TDB_ERROR ecode; - /* FIXME: Handle hash value truncated. */ - if (bits(val, TDB_OFF_HASH_TRUNCATED_BIT, 1)) - abort(); - + add_stat(tdb, compares, 1); /* Desired bucket must match. */ - if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK)) + if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK)) { + add_stat(tdb, compare_wrong_bucket, 1); return false; + } /* Top bits of offset == next bits of hash. */ - if (bits(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA) - != bits(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA, - TDB_OFF_UPPER_STEAL_EXTRA)) + if (bits_from(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA) + != bits_from(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA, + TDB_OFF_UPPER_STEAL_EXTRA)) { + add_stat(tdb, compare_wrong_offsetbits, 1); return false; + } off = val & TDB_OFF_MASK; - if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1) + ecode = tdb_read_convert(tdb, off, rec, sizeof(*rec)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return false; + } - /* FIXME: check extra bits in header? */ - if (rec_key_length(rec) != key->dsize) + if ((h->h & ((1 << 11)-1)) != rec_hash(rec)) { + add_stat(tdb, compare_wrong_rechash, 1); return false; + } - rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false); - if (!rkey) - return false; - ret = (memcmp(rkey, key->dptr, key->dsize) == 0); - tdb_access_release(tdb, rkey); - return ret; + return key_matches(tdb, rec, off, key); } static tdb_off_t hbucket_off(tdb_off_t group_start, unsigned bucket) @@ -123,10 +154,9 @@ static tdb_off_t hbucket_off(tdb_off_t group_start, unsigned bucket) + (bucket % (1 << TDB_HASH_GROUP_BITS)) * sizeof(tdb_off_t); } -/* Truncated hashes can't be all 1: that's how we spot a sub-hash */ bool is_subhash(tdb_off_t val) { - return val >> (64-TDB_OFF_UPPER_STEAL) == (1<> TDB_OFF_UPPER_STEAL_SUBHASH_BIT) & 1; } /* FIXME: Guess the depth, don't over-lock! */ @@ -136,6 +166,73 @@ static tdb_off_t hlock_range(tdb_off_t group, tdb_off_t *size) return group << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS)); } +static tdb_off_t COLD find_in_chain(struct tdb_context *tdb, + struct tdb_data key, + tdb_off_t chain, + struct hash_info *h, + struct tdb_used_record *rec, + struct traverse_info *tinfo) +{ + tdb_off_t off, next; + enum TDB_ERROR ecode; + + /* In case nothing is free, we set these to zero. */ + h->home_bucket = h->found_bucket = 0; + + for (off = chain; off; off = next) { + unsigned int i; + + h->group_start = off; + ecode = tdb_read_convert(tdb, off, h->group, sizeof(h->group)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return TDB_OFF_ERR; + } + + for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) { + tdb_off_t recoff; + if (!h->group[i]) { + /* Remember this empty bucket. */ + h->home_bucket = h->found_bucket = i; + continue; + } + + /* We can insert extra bits via add_to_hash + * empty bucket logic. */ + recoff = h->group[i] & TDB_OFF_MASK; + ecode = tdb_read_convert(tdb, recoff, rec, + sizeof(*rec)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return TDB_OFF_ERR; + } + + if (key_matches(tdb, rec, recoff, &key)) { + h->home_bucket = h->found_bucket = i; + + if (tinfo) { + tinfo->levels[tinfo->num_levels] + .hashtable = off; + tinfo->levels[tinfo->num_levels] + .total_buckets + = 1 << TDB_HASH_GROUP_BITS; + tinfo->levels[tinfo->num_levels].entry + = i; + tinfo->num_levels++; + } + return recoff; + } + } + next = tdb_read_off(tdb, off + + offsetof(struct tdb_chain, next)); + if (next == TDB_OFF_ERR) + return TDB_OFF_ERR; + if (next) + next += sizeof(struct tdb_used_record); + } + return 0; +} + /* This is the core routine which searches the hashtable for an entry. * On error, no locks are held and TDB_OFF_ERR is returned. * Otherwise, hinfo is filled in (and the optional tinfo). @@ -150,6 +247,7 @@ tdb_off_t find_and_lock(struct tdb_context *tdb, { uint32_t i, group; tdb_off_t hashtable; + enum TDB_ERROR ecode; h->h = tdb_hash(tdb, key.dptr, key.dsize); h->hash_used = 0; @@ -157,28 +255,34 @@ tdb_off_t find_and_lock(struct tdb_context *tdb, h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS); h->hlock_start = hlock_range(group, &h->hlock_range); - if (tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype, - TDB_LOCK_WAIT)) + ecode = tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype, + TDB_LOCK_WAIT); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return TDB_OFF_ERR; + } hashtable = offsetof(struct tdb_header, hashtable); if (tinfo) { tinfo->toplevel_group = group; tinfo->num_levels = 1; tinfo->levels[0].entry = 0; - tinfo->levels[0].hashtable = hashtable + tinfo->levels[0].hashtable = hashtable + (group << TDB_HASH_GROUP_BITS) * sizeof(tdb_off_t); tinfo->levels[0].total_buckets = 1 << TDB_HASH_GROUP_BITS; } - while (likely(h->hash_used < 64)) { + while (h->hash_used <= 64) { /* Read in the hash group. */ h->group_start = hashtable + group * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS); - if (tdb_read_convert(tdb, h->group_start, &h->group, - sizeof(h->group)) == -1) + ecode = tdb_read_convert(tdb, h->group_start, &h->group, + sizeof(h->group)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; goto fail; + } /* Pointer to another hash table? Go down... */ if (is_subhash(h->group[h->home_bucket])) { @@ -228,8 +332,7 @@ tdb_off_t find_and_lock(struct tdb_context *tdb, return 0; } - /* FIXME: We hit the bottom. Chain! */ - abort(); + return find_in_chain(tdb, key, hashtable, h, rec, tinfo); fail: tdb_unlock_hashes(tdb, h->hlock_start, h->hlock_range, ltype); @@ -239,8 +342,8 @@ fail: /* I wrote a simple test, expanding a hash to 2GB, for the following * cases: * 1) Expanding all the buckets at once, - * 2) Expanding the most-populated bucket, - * 3) Expanding the bucket we wanted to place the new entry ito. + * 2) Expanding the bucket we wanted to place the new entry into. + * 3) Expanding the most-populated bucket, * * I measured the worst/average/best density during this process. * 1) 3%/16%/30% @@ -300,19 +403,75 @@ static tdb_off_t encode_offset(tdb_off_t new_off, struct hash_info *h) { return h->home_bucket | new_off - | ((uint64_t)bits(h->h, + | ((uint64_t)bits_from(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA, TDB_OFF_UPPER_STEAL_EXTRA) << TDB_OFF_HASH_EXTRA_BIT); } -/* Simply overwrite the hash entry we found before. */ +/* Simply overwrite the hash entry we found before. */ int replace_in_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off) { - return tdb_write_off(tdb, hbucket_off(h->group_start, h->found_bucket), - encode_offset(new_off, h)); + enum TDB_ERROR ecode; + + ecode = tdb_write_off(tdb, hbucket_off(h->group_start, h->found_bucket), + encode_offset(new_off, h)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; +} + +/* We slot in anywhere that's empty in the chain. */ +static int COLD add_to_chain(struct tdb_context *tdb, + tdb_off_t subhash, + tdb_off_t new_off) +{ + size_t entry = tdb_find_zero_off(tdb, subhash, 1<ecode = ecode; + return -1; + } + ecode = tdb_write_off(tdb, subhash + + offsetof(struct tdb_chain, + next), + next); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + } + return add_to_chain(tdb, next, new_off); + } + + ecode = tdb_write_off(tdb, subhash + entry * sizeof(tdb_off_t), + new_off); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; } /* Add into a newly created subhash. */ @@ -322,45 +481,65 @@ static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash, tdb_off_t off = (val & TDB_OFF_MASK), *group; struct hash_info h; unsigned int gnum; + enum TDB_ERROR ecode; h.hash_used = hash_used; - /* FIXME chain if hash_used == 64 */ if (hash_used + TDB_SUBLEVEL_HASH_BITS > 64) - abort(); + return add_to_chain(tdb, subhash, off); - /* FIXME: Do truncated hash bits if we can! */ h.h = hash_record(tdb, off); gnum = use_bits(&h, TDB_SUBLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS); - h.group_start = subhash + sizeof(struct tdb_used_record) + h.group_start = subhash + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS); h.home_bucket = use_bits(&h, TDB_HASH_GROUP_BITS); group = tdb_access_write(tdb, h.group_start, sizeof(*group) << TDB_HASH_GROUP_BITS, true); - if (!group) + if (TDB_PTR_IS_ERR(group)) { + tdb->ecode = TDB_PTR_ERR(group); return -1; + } force_into_group(group, h.home_bucket, encode_offset(off, &h)); - return tdb_access_commit(tdb, group); + ecode = tdb_access_commit(tdb, group); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; } static int expand_group(struct tdb_context *tdb, struct hash_info *h) { - unsigned bucket, num_vals, i; + unsigned bucket, num_vals, i, magic; + size_t subsize; tdb_off_t subhash; tdb_off_t vals[1 << TDB_HASH_GROUP_BITS]; + enum TDB_ERROR ecode; /* Attach new empty subhash under fullest bucket. */ bucket = fullest_bucket(tdb, h->group, h->home_bucket); - subhash = alloc(tdb, 0, sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS, - 0, false); + if (h->hash_used == 64) { + add_stat(tdb, alloc_chain, 1); + subsize = sizeof(struct tdb_chain); + magic = TDB_CHAIN_MAGIC; + } else { + add_stat(tdb, alloc_subhash, 1); + subsize = (sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS); + magic = TDB_HTABLE_MAGIC; + } + + subhash = alloc(tdb, 0, subsize, 0, magic, false); if (subhash == TDB_OFF_ERR) return -1; - if (zero_out(tdb, subhash + sizeof(struct tdb_used_record), - sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS) == -1) + ecode = zero_out(tdb, subhash + sizeof(struct tdb_used_record), + subsize); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return -1; + } /* Remove any which are destined for bucket or are in wrong place. */ num_vals = 0; @@ -377,7 +556,10 @@ static int expand_group(struct tdb_context *tdb, struct hash_info *h) /* assert(num_vals); */ /* Overwrite expanded bucket with subhash pointer. */ - h->group[bucket] = subhash | ~((1ULL << (64 - TDB_OFF_UPPER_STEAL))-1); + h->group[bucket] = subhash | (1ULL << TDB_OFF_UPPER_STEAL_SUBHASH_BIT); + + /* Point to actual contents of record. */ + subhash += sizeof(struct tdb_used_record); /* Put values back. */ for (i = 0; i < num_vals; i++) { @@ -398,6 +580,7 @@ int delete_from_hash(struct tdb_context *tdb, struct hash_info *h) { unsigned int i, num_movers = 0; tdb_off_t movers[1 << TDB_HASH_GROUP_BITS]; + enum TDB_ERROR ecode; h->group[h->found_bucket] = 0; for (i = 1; i < (1 << TDB_HASH_GROUP_BITS); i++) { @@ -427,24 +610,35 @@ int delete_from_hash(struct tdb_context *tdb, struct hash_info *h) } /* Now we write back the hash group */ - return tdb_write_convert(tdb, h->group_start, - h->group, sizeof(h->group)); + ecode = tdb_write_convert(tdb, h->group_start, + h->group, sizeof(h->group)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; } int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off) { - /* FIXME: chain! */ - if (h->hash_used >= 64) - abort(); + enum TDB_ERROR ecode; /* We hit an empty bucket during search? That's where it goes. */ if (!h->group[h->found_bucket]) { h->group[h->found_bucket] = encode_offset(new_off, h); /* Write back the modified group. */ - return tdb_write_convert(tdb, h->group_start, - h->group, sizeof(h->group)); + ecode = tdb_write_convert(tdb, h->group_start, + h->group, sizeof(h->group)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; } + if (h->hash_used > 64) + return add_to_chain(tdb, h->group_start, new_off); + /* We're full. Expand. */ if (expand_group(tdb, h) == -1) return -1; @@ -455,9 +649,12 @@ int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off) unsigned int gnum; /* Write back the modified group. */ - if (tdb_write_convert(tdb, h->group_start, h->group, - sizeof(h->group))) + ecode = tdb_write_convert(tdb, h->group_start, h->group, + sizeof(h->group)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return -1; + } /* Move hashinfo down a level. */ hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK) @@ -466,16 +663,25 @@ int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off) h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS); h->group_start = hashtable + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS); - if (tdb_read_convert(tdb, h->group_start, &h->group, - sizeof(h->group)) == -1) + ecode = tdb_read_convert(tdb, h->group_start, &h->group, + sizeof(h->group)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return -1; + } } /* Expanding the group must have made room if it didn't choose this * bucket. */ - if (put_into_group(h->group, h->home_bucket, encode_offset(new_off, h))) - return tdb_write_convert(tdb, h->group_start, - h->group, sizeof(h->group)); + if (put_into_group(h->group, h->home_bucket, encode_offset(new_off,h))){ + ecode = tdb_write_convert(tdb, h->group_start, + h->group, sizeof(h->group)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; + } /* This can happen if all hashes in group (and us) dropped into same * group in subhash. */ @@ -523,7 +729,11 @@ again: tlevel++; tlevel->hashtable = off + sizeof(struct tdb_used_record); tlevel->entry = 0; - tlevel->total_buckets = (1 << TDB_SUBLEVEL_HASH_BITS); + /* Next level is a chain? */ + if (unlikely(tinfo->num_levels == TDB_MAX_LEVELS + 1)) + tlevel->total_buckets = (1 << TDB_HASH_GROUP_BITS); + else + tlevel->total_buckets = (1 << TDB_SUBLEVEL_HASH_BITS); goto again; } @@ -531,6 +741,20 @@ again: if (tinfo->num_levels == 1) return 0; + /* Handle chained entries. */ + if (unlikely(tinfo->num_levels == TDB_MAX_LEVELS + 1)) { + tlevel->hashtable = tdb_read_off(tdb, tlevel->hashtable + + offsetof(struct tdb_chain, + next)); + if (tlevel->hashtable == TDB_OFF_ERR) + return TDB_OFF_ERR; + if (tlevel->hashtable) { + tlevel->hashtable += sizeof(struct tdb_used_record); + tlevel->entry = 0; + goto again; + } + } + /* Go back up and keep searching. */ tinfo->num_levels--; tlevel--; @@ -538,36 +762,42 @@ again: } /* Return 1 if we find something, 0 if not, -1 on error. */ -int next_in_hash(struct tdb_context *tdb, int ltype, +int next_in_hash(struct tdb_context *tdb, struct traverse_info *tinfo, TDB_DATA *kbuf, size_t *dlen) { const unsigned group_bits = TDB_TOPLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS; - tdb_off_t hlock_start, hlock_range, off; + tdb_off_t hl_start, hl_range, off; + enum TDB_ERROR ecode; while (tinfo->toplevel_group < (1 << group_bits)) { - hlock_start = (tdb_off_t)tinfo->toplevel_group + hl_start = (tdb_off_t)tinfo->toplevel_group << (64 - group_bits); - hlock_range = 1ULL << group_bits; - if (tdb_lock_hashes(tdb, hlock_start, hlock_range, ltype, - TDB_LOCK_WAIT) != 0) + hl_range = 1ULL << group_bits; + ecode = tdb_lock_hashes(tdb, hl_start, hl_range, F_RDLCK, + TDB_LOCK_WAIT); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return -1; + } off = iterate_hash(tdb, tinfo); if (off) { struct tdb_used_record rec; - if (tdb_read_convert(tdb, off, &rec, sizeof(rec))) { + ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; tdb_unlock_hashes(tdb, - hlock_start, hlock_range, - ltype); + hl_start, hl_range, F_RDLCK); return -1; } - if (rec_magic(&rec) != TDB_MAGIC) { - tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, - "next_in_hash:" - " corrupt record at %llu\n", - (long long)off); + if (rec_magic(&rec) != TDB_USED_MAGIC) { + tdb_logerr(tdb, TDB_ERR_CORRUPT, + TDB_LOG_ERROR, + "next_in_hash:" + " corrupt record at %llu", + (long long)off); return -1; } @@ -576,20 +806,24 @@ int next_in_hash(struct tdb_context *tdb, int ltype, /* They want data as well? */ if (dlen) { *dlen = rec_data_length(&rec); - kbuf->dptr = tdb_alloc_read(tdb, + kbuf->dptr = tdb_alloc_read(tdb, off + sizeof(rec), kbuf->dsize + *dlen); } else { - kbuf->dptr = tdb_alloc_read(tdb, + kbuf->dptr = tdb_alloc_read(tdb, off + sizeof(rec), kbuf->dsize); } - tdb_unlock_hashes(tdb, hlock_start, hlock_range, ltype); - return kbuf->dptr ? 1 : -1; + tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK); + if (TDB_PTR_IS_ERR(kbuf->dptr)) { + tdb->ecode = TDB_PTR_ERR(kbuf->dptr); + return -1; + } + return 1; } - tdb_unlock_hashes(tdb, hlock_start, hlock_range, ltype); + tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK); tinfo->toplevel_group++; tinfo->levels[0].hashtable @@ -600,7 +834,7 @@ int next_in_hash(struct tdb_context *tdb, int ltype, } /* Return 1 if we find something, 0 if not, -1 on error. */ -int first_in_hash(struct tdb_context *tdb, int ltype, +int first_in_hash(struct tdb_context *tdb, struct traverse_info *tinfo, TDB_DATA *kbuf, size_t *dlen) { @@ -611,7 +845,7 @@ int first_in_hash(struct tdb_context *tdb, int ltype, tinfo->levels[0].entry = 0; tinfo->levels[0].total_buckets = (1 << TDB_HASH_GROUP_BITS); - return next_in_hash(tdb, ltype, tinfo, kbuf, dlen); + return next_in_hash(tdb, tinfo, kbuf, dlen); } /* Even if the entry isn't in this hash bucket, you'd have to lock this @@ -620,19 +854,23 @@ static int chainlock(struct tdb_context *tdb, const TDB_DATA *key, int ltype, enum tdb_lock_flags waitflag, const char *func) { - int ret; + enum TDB_ERROR ecode; uint64_t h = tdb_hash(tdb, key->dptr, key->dsize); tdb_off_t lockstart, locksize; unsigned int group, gbits; gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS; - group = bits(h, 64 - gbits, gbits); + group = bits_from(h, 64 - gbits, gbits); lockstart = hlock_range(group, &locksize); - ret = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag); + ecode = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag); tdb_trace_1rec(tdb, func, *key); - return ret; + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; } /* lock/unlock one hash chain. This is meant to be used to reduce @@ -647,12 +885,18 @@ int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key) uint64_t h = tdb_hash(tdb, key.dptr, key.dsize); tdb_off_t lockstart, locksize; unsigned int group, gbits; + enum TDB_ERROR ecode; gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS; - group = bits(h, 64 - gbits, gbits); + group = bits_from(h, 64 - gbits, gbits); lockstart = hlock_range(group, &locksize); tdb_trace_1rec(tdb, "tdb_chainunlock", key); - return tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK); + ecode = tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; }