uint64_t klen, hash;
r = tdb_access_read(tdb, off, sizeof(*r), true);
- if (!r)
+ if (TDB_PTR_IS_ERR(r)) {
+ tdb->ecode = TDB_PTR_ERR(r);
/* FIXME */
return 0;
+ }
klen = rec_key_length(r);
tdb_access_release(tdb, r);
key = tdb_access_read(tdb, off + sizeof(*r), klen, false);
- if (!key)
+ if (TDB_PTR_IS_ERR(key)) {
+ tdb->ecode = TDB_PTR_ERR(key);
return 0;
+ }
hash = tdb_hash(tdb, key, klen);
tdb_access_release(tdb, key);
}
rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false);
- if (!rkey)
+ if (TDB_PTR_IS_ERR(rkey)) {
+ tdb->ecode = TDB_PTR_ERR(rkey);
return ret;
+ }
if (memcmp(rkey, key->dptr, key->dsize) == 0)
ret = true;
else
struct tdb_used_record *rec)
{
tdb_off_t off;
+ enum TDB_ERROR ecode;
add_stat(tdb, compares, 1);
/* Desired bucket must match. */
}
off = val & TDB_OFF_MASK;
- if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
+ ecode = tdb_read_convert(tdb, off, rec, sizeof(*rec));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return false;
+ }
if ((h->h & ((1 << 11)-1)) != rec_hash(rec)) {
add_stat(tdb, compare_wrong_rechash, 1);
struct traverse_info *tinfo)
{
tdb_off_t off, next;
+ enum TDB_ERROR ecode;
/* In case nothing is free, we set these to zero. */
h->home_bucket = h->found_bucket = 0;
unsigned int i;
h->group_start = off;
- if (tdb_read_convert(tdb, off, h->group, sizeof(h->group)))
+ ecode = tdb_read_convert(tdb, off, h->group, sizeof(h->group));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return TDB_OFF_ERR;
+ }
for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
tdb_off_t recoff;
/* We can insert extra bits via add_to_hash
* empty bucket logic. */
recoff = h->group[i] & TDB_OFF_MASK;
- if (tdb_read_convert(tdb, recoff, rec, sizeof(*rec)))
+ ecode = tdb_read_convert(tdb, recoff, rec,
+ sizeof(*rec));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return TDB_OFF_ERR;
+ }
if (key_matches(tdb, rec, recoff, &key)) {
h->home_bucket = h->found_bucket = i;
{
uint32_t i, group;
tdb_off_t hashtable;
+ enum TDB_ERROR ecode;
h->h = tdb_hash(tdb, key.dptr, key.dsize);
h->hash_used = 0;
h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
h->hlock_start = hlock_range(group, &h->hlock_range);
- if (tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype,
- TDB_LOCK_WAIT))
+ ecode = tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype,
+ TDB_LOCK_WAIT);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return TDB_OFF_ERR;
+ }
hashtable = offsetof(struct tdb_header, hashtable);
if (tinfo) {
h->group_start = hashtable
+ group * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
- if (tdb_read_convert(tdb, h->group_start, &h->group,
- sizeof(h->group)) == -1)
+ ecode = tdb_read_convert(tdb, h->group_start, &h->group,
+ sizeof(h->group));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
goto fail;
+ }
/* Pointer to another hash table? Go down... */
if (is_subhash(h->group[h->home_bucket])) {
struct hash_info *h,
tdb_off_t new_off)
{
- return tdb_write_off(tdb, hbucket_off(h->group_start, h->found_bucket),
- encode_offset(new_off, h));
+ enum TDB_ERROR ecode;
+
+ ecode = tdb_write_off(tdb, hbucket_off(h->group_start, h->found_bucket),
+ encode_offset(new_off, h));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
+ return -1;
+ }
+ return 0;
}
/* We slot in anywhere that's empty in the chain. */
tdb_off_t new_off)
{
size_t entry = tdb_find_zero_off(tdb, subhash, 1<<TDB_HASH_GROUP_BITS);
+ enum TDB_ERROR ecode;
if (entry == 1 << TDB_HASH_GROUP_BITS) {
tdb_off_t next;
TDB_CHAIN_MAGIC, false);
if (next == TDB_OFF_ERR)
return -1;
- if (zero_out(tdb, next+sizeof(struct tdb_used_record),
- sizeof(struct tdb_chain)))
+ ecode = zero_out(tdb,
+ next+sizeof(struct tdb_used_record),
+ sizeof(struct tdb_chain));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return -1;
- if (tdb_write_off(tdb, subhash
- + offsetof(struct tdb_chain, next),
- next) != 0)
+ }
+ ecode = tdb_write_off(tdb, subhash
+ + offsetof(struct tdb_chain,
+ next),
+ next);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return -1;
+ }
}
return add_to_chain(tdb, next, new_off);
}
- return tdb_write_off(tdb, subhash + entry * sizeof(tdb_off_t),
- new_off);
+ ecode = tdb_write_off(tdb, subhash + entry * sizeof(tdb_off_t),
+ new_off);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
+ return -1;
+ }
+ return 0;
}
/* Add into a newly created subhash. */
tdb_off_t off = (val & TDB_OFF_MASK), *group;
struct hash_info h;
unsigned int gnum;
+ enum TDB_ERROR ecode;
h.hash_used = hash_used;
group = tdb_access_write(tdb, h.group_start,
sizeof(*group) << TDB_HASH_GROUP_BITS, true);
- if (!group)
+ if (TDB_PTR_IS_ERR(group)) {
+ tdb->ecode = TDB_PTR_ERR(group);
return -1;
+ }
force_into_group(group, h.home_bucket, encode_offset(off, &h));
- return tdb_access_commit(tdb, group);
+ ecode = tdb_access_commit(tdb, group);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
+ return -1;
+ }
+ return 0;
}
static int expand_group(struct tdb_context *tdb, struct hash_info *h)
size_t subsize;
tdb_off_t subhash;
tdb_off_t vals[1 << TDB_HASH_GROUP_BITS];
+ enum TDB_ERROR ecode;
/* Attach new empty subhash under fullest bucket. */
bucket = fullest_bucket(tdb, h->group, h->home_bucket);
if (subhash == TDB_OFF_ERR)
return -1;
- if (zero_out(tdb, subhash + sizeof(struct tdb_used_record), subsize))
+ ecode = zero_out(tdb, subhash + sizeof(struct tdb_used_record),
+ subsize);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return -1;
+ }
/* Remove any which are destined for bucket or are in wrong place. */
num_vals = 0;
{
unsigned int i, num_movers = 0;
tdb_off_t movers[1 << TDB_HASH_GROUP_BITS];
+ enum TDB_ERROR ecode;
h->group[h->found_bucket] = 0;
for (i = 1; i < (1 << TDB_HASH_GROUP_BITS); i++) {
}
/* Now we write back the hash group */
- return tdb_write_convert(tdb, h->group_start,
- h->group, sizeof(h->group));
+ ecode = tdb_write_convert(tdb, h->group_start,
+ h->group, sizeof(h->group));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
+ return -1;
+ }
+ return 0;
}
int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off)
{
+ enum TDB_ERROR ecode;
+
/* We hit an empty bucket during search? That's where it goes. */
if (!h->group[h->found_bucket]) {
h->group[h->found_bucket] = encode_offset(new_off, h);
/* Write back the modified group. */
- return tdb_write_convert(tdb, h->group_start,
- h->group, sizeof(h->group));
+ ecode = tdb_write_convert(tdb, h->group_start,
+ h->group, sizeof(h->group));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
+ return -1;
+ }
+ return 0;
}
if (h->hash_used > 64)
unsigned int gnum;
/* Write back the modified group. */
- if (tdb_write_convert(tdb, h->group_start, h->group,
- sizeof(h->group)))
+ ecode = tdb_write_convert(tdb, h->group_start, h->group,
+ sizeof(h->group));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return -1;
+ }
/* Move hashinfo down a level. */
hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
h->group_start = hashtable
+ gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
- if (tdb_read_convert(tdb, h->group_start, &h->group,
- sizeof(h->group)) == -1)
+ ecode = tdb_read_convert(tdb, h->group_start, &h->group,
+ sizeof(h->group));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return -1;
+ }
}
/* Expanding the group must have made room if it didn't choose this
* bucket. */
- if (put_into_group(h->group, h->home_bucket, encode_offset(new_off, h)))
- return tdb_write_convert(tdb, h->group_start,
- h->group, sizeof(h->group));
+ if (put_into_group(h->group, h->home_bucket, encode_offset(new_off,h))){
+ ecode = tdb_write_convert(tdb, h->group_start,
+ h->group, sizeof(h->group));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
+ return -1;
+ }
+ return 0;
+ }
/* This can happen if all hashes in group (and us) dropped into same
* group in subhash. */
}
/* Return 1 if we find something, 0 if not, -1 on error. */
-int next_in_hash(struct tdb_context *tdb, int ltype,
+int next_in_hash(struct tdb_context *tdb,
struct traverse_info *tinfo,
TDB_DATA *kbuf, size_t *dlen)
{
const unsigned group_bits = TDB_TOPLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS;
tdb_off_t hl_start, hl_range, off;
+ enum TDB_ERROR ecode;
while (tinfo->toplevel_group < (1 << group_bits)) {
hl_start = (tdb_off_t)tinfo->toplevel_group
<< (64 - group_bits);
hl_range = 1ULL << group_bits;
- if (tdb_lock_hashes(tdb, hl_start, hl_range, ltype,
- TDB_LOCK_WAIT) != 0)
+ ecode = tdb_lock_hashes(tdb, hl_start, hl_range, F_RDLCK,
+ TDB_LOCK_WAIT);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return -1;
+ }
off = iterate_hash(tdb, tinfo);
if (off) {
struct tdb_used_record rec;
- if (tdb_read_convert(tdb, off, &rec, sizeof(rec))) {
+ ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
tdb_unlock_hashes(tdb,
- hl_start, hl_range, ltype);
+ hl_start, hl_range, F_RDLCK);
return -1;
}
if (rec_magic(&rec) != TDB_USED_MAGIC) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
- TDB_DEBUG_FATAL,
+ TDB_LOG_ERROR,
"next_in_hash:"
" corrupt record at %llu",
(long long)off);
off + sizeof(rec),
kbuf->dsize);
}
- tdb_unlock_hashes(tdb, hl_start, hl_range, ltype);
- return kbuf->dptr ? 1 : -1;
+ tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK);
+ if (TDB_PTR_IS_ERR(kbuf->dptr)) {
+ tdb->ecode = TDB_PTR_ERR(kbuf->dptr);
+ return -1;
+ }
+ return 1;
}
- tdb_unlock_hashes(tdb, hl_start, hl_range, ltype);
+ tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK);
tinfo->toplevel_group++;
tinfo->levels[0].hashtable
}
/* Return 1 if we find something, 0 if not, -1 on error. */
-int first_in_hash(struct tdb_context *tdb, int ltype,
+int first_in_hash(struct tdb_context *tdb,
struct traverse_info *tinfo,
TDB_DATA *kbuf, size_t *dlen)
{
tinfo->levels[0].entry = 0;
tinfo->levels[0].total_buckets = (1 << TDB_HASH_GROUP_BITS);
- return next_in_hash(tdb, ltype, tinfo, kbuf, dlen);
+ return next_in_hash(tdb, tinfo, kbuf, dlen);
}
/* Even if the entry isn't in this hash bucket, you'd have to lock this
int ltype, enum tdb_lock_flags waitflag,
const char *func)
{
- int ret;
+ enum TDB_ERROR ecode;
uint64_t h = tdb_hash(tdb, key->dptr, key->dsize);
tdb_off_t lockstart, locksize;
unsigned int group, gbits;
lockstart = hlock_range(group, &locksize);
- ret = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag);
+ ecode = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag);
tdb_trace_1rec(tdb, func, *key);
- return ret;
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
+ return -1;
+ }
+ return 0;
}
/* lock/unlock one hash chain. This is meant to be used to reduce
uint64_t h = tdb_hash(tdb, key.dptr, key.dsize);
tdb_off_t lockstart, locksize;
unsigned int group, gbits;
+ enum TDB_ERROR ecode;
gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
group = bits_from(h, 64 - gbits, gbits);
lockstart = hlock_range(group, &locksize);
tdb_trace_1rec(tdb, "tdb_chainunlock", key);
- return tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK);
+ ecode = tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
+ return -1;
+ }
+ return 0;
}