X-Git-Url: http://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Ftdb2%2Fhash.c;h=afa5b745717bdb234c84320ca8fed4358bd80973;hp=18f6b1720eb662b2a0f140a6d23f7c844269b223;hb=74c15d72a2ddc484c771bc226134673409e9a40f;hpb=2491b65a6d10cd6ca1a3e05bf535eb0180047922 diff --git a/ccan/tdb2/hash.c b/ccan/tdb2/hash.c index 18f6b172..afa5b745 100644 --- a/ccan/tdb2/hash.c +++ b/ccan/tdb2/hash.c @@ -1,7 +1,7 @@ - /* + /* Trivial Database 2: hash handling Copyright (C) Rusty Russell 2010 - + This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either @@ -64,7 +64,7 @@ uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off) } /* Get bits from a value. */ -static uint32_t bits(uint64_t val, unsigned start, unsigned num) +static uint32_t bits_from(uint64_t val, unsigned start, unsigned num) { assert(num <= 32); return (val >> start) & ((1U << num) - 1); @@ -75,7 +75,7 @@ static uint32_t bits(uint64_t val, unsigned start, unsigned num) static uint32_t use_bits(struct hash_info *h, unsigned num) { h->hash_used += num; - return bits(h->h, 64 - h->hash_used, num); + return bits_from(h->h, 64 - h->hash_used, num); } static bool key_matches(struct tdb_context *tdb, @@ -119,8 +119,8 @@ static bool match(struct tdb_context *tdb, } /* Top bits of offset == next bits of hash. */ - if (bits(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA) - != bits(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA, + if (bits_from(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA) + != bits_from(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA, TDB_OFF_UPPER_STEAL_EXTRA)) { add_stat(tdb, compare_wrong_offsetbits, 1); return false; @@ -245,7 +245,7 @@ tdb_off_t find_and_lock(struct tdb_context *tdb, tinfo->toplevel_group = group; tinfo->num_levels = 1; tinfo->levels[0].entry = 0; - tinfo->levels[0].hashtable = hashtable + tinfo->levels[0].hashtable = hashtable + (group << TDB_HASH_GROUP_BITS) * sizeof(tdb_off_t); tinfo->levels[0].total_buckets = 1 << TDB_HASH_GROUP_BITS; } @@ -378,13 +378,13 @@ static tdb_off_t encode_offset(tdb_off_t new_off, struct hash_info *h) { return h->home_bucket | new_off - | ((uint64_t)bits(h->h, + | ((uint64_t)bits_from(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA, TDB_OFF_UPPER_STEAL_EXTRA) << TDB_OFF_HASH_EXTRA_BIT); } -/* Simply overwrite the hash entry we found before. */ +/* Simply overwrite the hash entry we found before. */ int replace_in_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off) @@ -678,18 +678,18 @@ again: } /* Return 1 if we find something, 0 if not, -1 on error. */ -int next_in_hash(struct tdb_context *tdb, int ltype, +int next_in_hash(struct tdb_context *tdb, struct traverse_info *tinfo, TDB_DATA *kbuf, size_t *dlen) { const unsigned group_bits = TDB_TOPLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS; - tdb_off_t hlock_start, hlock_range, off; + tdb_off_t hl_start, hl_range, off; while (tinfo->toplevel_group < (1 << group_bits)) { - hlock_start = (tdb_off_t)tinfo->toplevel_group + hl_start = (tdb_off_t)tinfo->toplevel_group << (64 - group_bits); - hlock_range = 1ULL << group_bits; - if (tdb_lock_hashes(tdb, hlock_start, hlock_range, ltype, + hl_range = 1ULL << group_bits; + if (tdb_lock_hashes(tdb, hl_start, hl_range, F_RDLCK, TDB_LOCK_WAIT) != 0) return -1; @@ -699,13 +699,12 @@ int next_in_hash(struct tdb_context *tdb, int ltype, if (tdb_read_convert(tdb, off, &rec, sizeof(rec))) { tdb_unlock_hashes(tdb, - hlock_start, hlock_range, - ltype); + hl_start, hl_range, F_RDLCK); return -1; } if (rec_magic(&rec) != TDB_USED_MAGIC) { tdb_logerr(tdb, TDB_ERR_CORRUPT, - TDB_DEBUG_FATAL, + TDB_LOG_ERROR, "next_in_hash:" " corrupt record at %llu", (long long)off); @@ -717,20 +716,20 @@ int next_in_hash(struct tdb_context *tdb, int ltype, /* They want data as well? */ if (dlen) { *dlen = rec_data_length(&rec); - kbuf->dptr = tdb_alloc_read(tdb, + kbuf->dptr = tdb_alloc_read(tdb, off + sizeof(rec), kbuf->dsize + *dlen); } else { - kbuf->dptr = tdb_alloc_read(tdb, + kbuf->dptr = tdb_alloc_read(tdb, off + sizeof(rec), kbuf->dsize); } - tdb_unlock_hashes(tdb, hlock_start, hlock_range, ltype); + tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK); return kbuf->dptr ? 1 : -1; } - tdb_unlock_hashes(tdb, hlock_start, hlock_range, ltype); + tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK); tinfo->toplevel_group++; tinfo->levels[0].hashtable @@ -741,7 +740,7 @@ int next_in_hash(struct tdb_context *tdb, int ltype, } /* Return 1 if we find something, 0 if not, -1 on error. */ -int first_in_hash(struct tdb_context *tdb, int ltype, +int first_in_hash(struct tdb_context *tdb, struct traverse_info *tinfo, TDB_DATA *kbuf, size_t *dlen) { @@ -752,7 +751,7 @@ int first_in_hash(struct tdb_context *tdb, int ltype, tinfo->levels[0].entry = 0; tinfo->levels[0].total_buckets = (1 << TDB_HASH_GROUP_BITS); - return next_in_hash(tdb, ltype, tinfo, kbuf, dlen); + return next_in_hash(tdb, tinfo, kbuf, dlen); } /* Even if the entry isn't in this hash bucket, you'd have to lock this @@ -767,7 +766,7 @@ static int chainlock(struct tdb_context *tdb, const TDB_DATA *key, unsigned int group, gbits; gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS; - group = bits(h, 64 - gbits, gbits); + group = bits_from(h, 64 - gbits, gbits); lockstart = hlock_range(group, &locksize); @@ -790,7 +789,7 @@ int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key) unsigned int group, gbits; gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS; - group = bits(h, 64 - gbits, gbits); + group = bits_from(h, 64 - gbits, gbits); lockstart = hlock_range(group, &locksize);