X-Git-Url: https://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Ftdb2%2Fhash.c;h=260c36f266f0dc8dccaa331993153fe69d6c9a64;hp=afa5b745717bdb234c84320ca8fed4358bd80973;hb=1d4d21dfb5ac43274afc125f132d196ce07f3177;hpb=b24f8e2ae5ac22fc2e5dbfebebf9c5fa2f338588 diff --git a/ccan/tdb2/hash.c b/ccan/tdb2/hash.c index afa5b745..260c36f2 100644 --- a/ccan/tdb2/hash.c +++ b/ccan/tdb2/hash.c @@ -47,16 +47,20 @@ uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off) uint64_t klen, hash; r = tdb_access_read(tdb, off, sizeof(*r), true); - if (!r) + if (TDB_PTR_IS_ERR(r)) { + tdb->ecode = TDB_PTR_ERR(r); /* FIXME */ return 0; + } klen = rec_key_length(r); tdb_access_release(tdb, r); key = tdb_access_read(tdb, off + sizeof(*r), klen, false); - if (!key) + if (TDB_PTR_IS_ERR(key)) { + tdb->ecode = TDB_PTR_ERR(key); return 0; + } hash = tdb_hash(tdb, key, klen); tdb_access_release(tdb, key); @@ -92,8 +96,10 @@ static bool key_matches(struct tdb_context *tdb, } rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false); - if (!rkey) + if (TDB_PTR_IS_ERR(rkey)) { + tdb->ecode = TDB_PTR_ERR(rkey); return ret; + } if (memcmp(rkey, key->dptr, key->dsize) == 0) ret = true; else @@ -110,6 +116,7 @@ static bool match(struct tdb_context *tdb, struct tdb_used_record *rec) { tdb_off_t off; + enum TDB_ERROR ecode; add_stat(tdb, compares, 1); /* Desired bucket must match. */ @@ -127,8 +134,11 @@ static bool match(struct tdb_context *tdb, } off = val & TDB_OFF_MASK; - if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1) + ecode = tdb_read_convert(tdb, off, rec, sizeof(*rec)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return false; + } if ((h->h & ((1 << 11)-1)) != rec_hash(rec)) { add_stat(tdb, compare_wrong_rechash, 1); @@ -164,6 +174,7 @@ static tdb_off_t COLD find_in_chain(struct tdb_context *tdb, struct traverse_info *tinfo) { tdb_off_t off, next; + enum TDB_ERROR ecode; /* In case nothing is free, we set these to zero. */ h->home_bucket = h->found_bucket = 0; @@ -172,8 +183,11 @@ static tdb_off_t COLD find_in_chain(struct tdb_context *tdb, unsigned int i; h->group_start = off; - if (tdb_read_convert(tdb, off, h->group, sizeof(h->group))) + ecode = tdb_read_convert(tdb, off, h->group, sizeof(h->group)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return TDB_OFF_ERR; + } for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) { tdb_off_t recoff; @@ -186,8 +200,12 @@ static tdb_off_t COLD find_in_chain(struct tdb_context *tdb, /* We can insert extra bits via add_to_hash * empty bucket logic. */ recoff = h->group[i] & TDB_OFF_MASK; - if (tdb_read_convert(tdb, recoff, rec, sizeof(*rec))) + ecode = tdb_read_convert(tdb, recoff, rec, + sizeof(*rec)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return TDB_OFF_ERR; + } if (key_matches(tdb, rec, recoff, &key)) { h->home_bucket = h->found_bucket = i; @@ -229,6 +247,7 @@ tdb_off_t find_and_lock(struct tdb_context *tdb, { uint32_t i, group; tdb_off_t hashtable; + enum TDB_ERROR ecode; h->h = tdb_hash(tdb, key.dptr, key.dsize); h->hash_used = 0; @@ -236,9 +255,12 @@ tdb_off_t find_and_lock(struct tdb_context *tdb, h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS); h->hlock_start = hlock_range(group, &h->hlock_range); - if (tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype, - TDB_LOCK_WAIT)) + ecode = tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype, + TDB_LOCK_WAIT); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return TDB_OFF_ERR; + } hashtable = offsetof(struct tdb_header, hashtable); if (tinfo) { @@ -255,9 +277,12 @@ tdb_off_t find_and_lock(struct tdb_context *tdb, h->group_start = hashtable + group * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS); - if (tdb_read_convert(tdb, h->group_start, &h->group, - sizeof(h->group)) == -1) + ecode = tdb_read_convert(tdb, h->group_start, &h->group, + sizeof(h->group)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; goto fail; + } /* Pointer to another hash table? Go down... */ if (is_subhash(h->group[h->home_bucket])) { @@ -389,8 +414,15 @@ int replace_in_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off) { - return tdb_write_off(tdb, hbucket_off(h->group_start, h->found_bucket), - encode_offset(new_off, h)); + enum TDB_ERROR ecode; + + ecode = tdb_write_off(tdb, hbucket_off(h->group_start, h->found_bucket), + encode_offset(new_off, h)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; } /* We slot in anywhere that's empty in the chain. */ @@ -399,6 +431,7 @@ static int COLD add_to_chain(struct tdb_context *tdb, tdb_off_t new_off) { size_t entry = tdb_find_zero_off(tdb, subhash, 1<ecode = ecode; return -1; - if (tdb_write_off(tdb, subhash - + offsetof(struct tdb_chain, next), - next) != 0) + } + ecode = tdb_write_off(tdb, subhash + + offsetof(struct tdb_chain, + next), + next); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return -1; + } } return add_to_chain(tdb, next, new_off); } - return tdb_write_off(tdb, subhash + entry * sizeof(tdb_off_t), - new_off); + ecode = tdb_write_off(tdb, subhash + entry * sizeof(tdb_off_t), + new_off); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; } /* Add into a newly created subhash. */ @@ -435,6 +481,7 @@ static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash, tdb_off_t off = (val & TDB_OFF_MASK), *group; struct hash_info h; unsigned int gnum; + enum TDB_ERROR ecode; h.hash_used = hash_used; @@ -449,10 +496,17 @@ static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash, group = tdb_access_write(tdb, h.group_start, sizeof(*group) << TDB_HASH_GROUP_BITS, true); - if (!group) + if (TDB_PTR_IS_ERR(group)) { + tdb->ecode = TDB_PTR_ERR(group); return -1; + } force_into_group(group, h.home_bucket, encode_offset(off, &h)); - return tdb_access_commit(tdb, group); + ecode = tdb_access_commit(tdb, group); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; } static int expand_group(struct tdb_context *tdb, struct hash_info *h) @@ -461,6 +515,7 @@ static int expand_group(struct tdb_context *tdb, struct hash_info *h) size_t subsize; tdb_off_t subhash; tdb_off_t vals[1 << TDB_HASH_GROUP_BITS]; + enum TDB_ERROR ecode; /* Attach new empty subhash under fullest bucket. */ bucket = fullest_bucket(tdb, h->group, h->home_bucket); @@ -479,8 +534,12 @@ static int expand_group(struct tdb_context *tdb, struct hash_info *h) if (subhash == TDB_OFF_ERR) return -1; - if (zero_out(tdb, subhash + sizeof(struct tdb_used_record), subsize)) + ecode = zero_out(tdb, subhash + sizeof(struct tdb_used_record), + subsize); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return -1; + } /* Remove any which are destined for bucket or are in wrong place. */ num_vals = 0; @@ -521,6 +580,7 @@ int delete_from_hash(struct tdb_context *tdb, struct hash_info *h) { unsigned int i, num_movers = 0; tdb_off_t movers[1 << TDB_HASH_GROUP_BITS]; + enum TDB_ERROR ecode; h->group[h->found_bucket] = 0; for (i = 1; i < (1 << TDB_HASH_GROUP_BITS); i++) { @@ -550,18 +610,30 @@ int delete_from_hash(struct tdb_context *tdb, struct hash_info *h) } /* Now we write back the hash group */ - return tdb_write_convert(tdb, h->group_start, - h->group, sizeof(h->group)); + ecode = tdb_write_convert(tdb, h->group_start, + h->group, sizeof(h->group)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; } int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off) { + enum TDB_ERROR ecode; + /* We hit an empty bucket during search? That's where it goes. */ if (!h->group[h->found_bucket]) { h->group[h->found_bucket] = encode_offset(new_off, h); /* Write back the modified group. */ - return tdb_write_convert(tdb, h->group_start, - h->group, sizeof(h->group)); + ecode = tdb_write_convert(tdb, h->group_start, + h->group, sizeof(h->group)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; } if (h->hash_used > 64) @@ -577,9 +649,12 @@ int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off) unsigned int gnum; /* Write back the modified group. */ - if (tdb_write_convert(tdb, h->group_start, h->group, - sizeof(h->group))) + ecode = tdb_write_convert(tdb, h->group_start, h->group, + sizeof(h->group)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return -1; + } /* Move hashinfo down a level. */ hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK) @@ -588,16 +663,25 @@ int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off) h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS); h->group_start = hashtable + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS); - if (tdb_read_convert(tdb, h->group_start, &h->group, - sizeof(h->group)) == -1) + ecode = tdb_read_convert(tdb, h->group_start, &h->group, + sizeof(h->group)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return -1; + } } /* Expanding the group must have made room if it didn't choose this * bucket. */ - if (put_into_group(h->group, h->home_bucket, encode_offset(new_off, h))) - return tdb_write_convert(tdb, h->group_start, - h->group, sizeof(h->group)); + if (put_into_group(h->group, h->home_bucket, encode_offset(new_off,h))){ + ecode = tdb_write_convert(tdb, h->group_start, + h->group, sizeof(h->group)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; + } /* This can happen if all hashes in group (and us) dropped into same * group in subhash. */ @@ -684,20 +768,26 @@ int next_in_hash(struct tdb_context *tdb, { const unsigned group_bits = TDB_TOPLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS; tdb_off_t hl_start, hl_range, off; + enum TDB_ERROR ecode; while (tinfo->toplevel_group < (1 << group_bits)) { hl_start = (tdb_off_t)tinfo->toplevel_group << (64 - group_bits); hl_range = 1ULL << group_bits; - if (tdb_lock_hashes(tdb, hl_start, hl_range, F_RDLCK, - TDB_LOCK_WAIT) != 0) + ecode = tdb_lock_hashes(tdb, hl_start, hl_range, F_RDLCK, + TDB_LOCK_WAIT); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return -1; + } off = iterate_hash(tdb, tinfo); if (off) { struct tdb_used_record rec; - if (tdb_read_convert(tdb, off, &rec, sizeof(rec))) { + ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec)); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK); return -1; @@ -726,7 +816,11 @@ int next_in_hash(struct tdb_context *tdb, kbuf->dsize); } tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK); - return kbuf->dptr ? 1 : -1; + if (TDB_PTR_IS_ERR(kbuf->dptr)) { + tdb->ecode = TDB_PTR_ERR(kbuf->dptr); + return -1; + } + return 1; } tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK); @@ -760,7 +854,7 @@ static int chainlock(struct tdb_context *tdb, const TDB_DATA *key, int ltype, enum tdb_lock_flags waitflag, const char *func) { - int ret; + enum TDB_ERROR ecode; uint64_t h = tdb_hash(tdb, key->dptr, key->dsize); tdb_off_t lockstart, locksize; unsigned int group, gbits; @@ -770,9 +864,13 @@ static int chainlock(struct tdb_context *tdb, const TDB_DATA *key, lockstart = hlock_range(group, &locksize); - ret = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag); + ecode = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag); tdb_trace_1rec(tdb, func, *key); - return ret; + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; } /* lock/unlock one hash chain. This is meant to be used to reduce @@ -787,6 +885,7 @@ int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key) uint64_t h = tdb_hash(tdb, key.dptr, key.dsize); tdb_off_t lockstart, locksize; unsigned int group, gbits; + enum TDB_ERROR ecode; gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS; group = bits_from(h, 64 - gbits, gbits); @@ -794,5 +893,10 @@ int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key) lockstart = hlock_range(group, &locksize); tdb_trace_1rec(tdb, "tdb_chainunlock", key); - return tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK); + ecode = tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + return -1; + } + return 0; }