off = tdb_read_off(tdb, off + offsetof(struct tdb_chain, next));
if (TDB_OFF_IS_ERR(off)) {
- return off;
+ return TDB_OFF_TO_ERR(off);
}
if (off == 0)
return TDB_SUCCESS;
h = bucket_off(ftable_off, i);
for (off = tdb_read_off(tdb, h); off; off = f.next) {
if (TDB_OFF_IS_ERR(off)) {
- return off;
+ return TDB_OFF_TO_ERR(off);
}
if (!first) {
off &= TDB_OFF_MASK;
char c;
ecode = tdb->tdb2.io->tread(tdb, off, &c, 1);
if (ecode != TDB_SUCCESS) {
- return ecode;
+ return TDB_ERR_TO_OFF(ecode);
}
if (c != 0 && c != 0x43)
break;
} else {
len = dead_space(tdb, off);
if (TDB_OFF_IS_ERR(len)) {
- return len;
+ return TDB_OFF_TO_ERR(len);
}
if (len < sizeof(rec.r)) {
return tdb_logerr(tdb, TDB_ERR_CORRUPT,
for (ft = first_ftable(tdb); ft; ft = next_ftable(tdb, ft)) {
if (TDB_OFF_IS_ERR(ft)) {
- ecode = ft;
+ ecode = TDB_OFF_TO_ERR(ft);
goto out;
}
ecode = check_free_table(tdb, ft, num_ftables, fr, num_free,
while (off) {
if (TDB_OFF_IS_ERR(off)) {
- return off;
+ return TDB_OFF_TO_ERR(off);
}
rnd = random();
/* Get prev->next */
prev_next = tdb_read_off(tdb, off);
if (TDB_OFF_IS_ERR(prev_next))
- return prev_next;
+ return TDB_OFF_TO_ERR(prev_next);
/* If prev->next == 0, we were head: update bucket to point to next. */
if (prev_next == 0) {
/* We must preserve upper bits. */
head = tdb_read_off(tdb, b_off);
if (TDB_OFF_IS_ERR(head))
- return head;
+ return TDB_OFF_TO_ERR(head);
if ((head & TDB_OFF_MASK) != r_off) {
return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
if (r->next == 0) {
head = tdb_read_off(tdb, b_off);
if (TDB_OFF_IS_ERR(head))
- return head;
+ return TDB_OFF_TO_ERR(head);
head &= TDB_OFF_MASK;
off = head + offsetof(struct tdb_free_record, magic_and_prev);
} else {
head = tdb_read_off(tdb, b_off);
if (TDB_OFF_IS_ERR(head))
- return head;
+ return TDB_OFF_TO_ERR(head);
/* We only need to set ftable_and_len; rest is set in enqueue_in_free */
new.ftable_and_len = ((uint64_t)tdb->tdb2.ftable << (64 - TDB_OFF_UPPER_STEAL))
nb_off = ftable_offset(tdb, ftable);
if (TDB_OFF_IS_ERR(nb_off)) {
tdb_access_release(tdb, r);
- ecode = nb_off;
+ ecode = TDB_OFF_TO_ERR(nb_off);
goto err;
}
nb_off = bucket_off(nb_off, bucket);
/* Did we just mess up a record you were hoping to use? */
if (end == *protect) {
tdb->stats.alloc_coalesce_iterate_clash++;
- *protect = TDB_ERR_NOEXIST;
+ *protect = TDB_ERR_TO_OFF(TDB_ERR_NOEXIST);
}
ecode = remove_from_list(tdb, nb_off, end, &rec);
/* Before we expand, check this isn't one you wanted protected? */
if (off == *protect) {
- *protect = TDB_ERR_EXISTS;
+ *protect = TDB_ERR_TO_OFF(TDB_ERR_EXISTS);
tdb->stats.alloc_coalesce_iterate_clash++;
}
if (ecode != TDB_SUCCESS) {
/* Need to drop lock. Can't rely on anything stable. */
tdb->stats.alloc_coalesce_lockfail++;
- *protect = TDB_ERR_CORRUPT;
+ *protect = TDB_ERR_TO_OFF(TDB_ERR_CORRUPT);
/* We have to drop this to avoid deadlocks, so make sure record
* doesn't get coalesced by someone else! */
ecode = add_free_record(tdb, off, end - off, TDB_LOCK_WAIT,
false);
if (ecode != TDB_SUCCESS) {
- return ecode;
+ return TDB_ERR_TO_OFF(ecode);
}
} else if (TDB_OFF_IS_ERR(*protect)) {
/* For simplicity, we always drop lock if they can't continue */
err:
/* To unify error paths, we *always* unlock bucket on error. */
tdb_unlock_free_bucket(tdb, b_off);
- return ecode;
+ return TDB_ERR_TO_OFF(ecode);
}
/* List is locked: we unlock it. */
off = tdb_read_off(tdb, b_off);
if (TDB_OFF_IS_ERR(off)) {
- ecode = off;
+ ecode = TDB_OFF_TO_ERR(off);
goto unlock_err;
}
/* A little bit of paranoia: counter should be 0. */
coal = coalesce(tdb, off, b_off, frec_len(&rec), &next);
if (TDB_OFF_IS_ERR(coal)) {
/* This has already unlocked on error. */
- return coal;
+ return TDB_OFF_TO_ERR(coal);
}
if (TDB_OFF_IS_ERR(next)) {
/* Coalescing had to unlock, so stop. */
/* Get the old head. */
oldhoff = tdb_read_off(tdb, b_off);
if (TDB_OFF_IS_ERR(oldhoff)) {
- ecode = oldhoff;
+ ecode = TDB_OFF_TO_ERR(oldhoff);
goto unlock_err;
}
/* Lock this bucket. */
ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT);
if (ecode != TDB_SUCCESS) {
- return ecode;
+ return TDB_ERR_TO_OFF(ecode);
}
best.ftable_and_len = -1ULL;
* as we go. */
off = tdb_read_off(tdb, b_off);
if (TDB_OFF_IS_ERR(off)) {
- ecode = off;
+ ecode = TDB_OFF_TO_ERR(off);
goto unlock_err;
}
off &= TDB_OFF_MASK;
+ frec_len(&best) - leftover,
leftover, TDB_LOCK_WAIT, false);
if (ecode != TDB_SUCCESS) {
- best_off = ecode;
+ best_off = TDB_ERR_TO_OFF(ecode);
}
}
tdb_unlock_free_bucket(tdb, b_off);
unlock_err:
tdb_unlock_free_bucket(tdb, b_off);
- return ecode;
+ return TDB_ERR_TO_OFF(ecode);
}
/* Get a free block from current free list, or 0 if none, -ve on error. */
ecode = tdb_expand(tdb, adjust_size(keylen, datalen));
if (ecode != TDB_SUCCESS) {
- return ecode;
+ return TDB_ERR_TO_OFF(ecode);
}
}
rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false);
if (TDB_PTR_IS_ERR(rkey)) {
- return TDB_PTR_ERR(rkey);
+ return (tdb_bool_err)TDB_PTR_ERR(rkey);
}
if (memcmp(rkey, key->dptr, key->dsize) == 0)
ret = true;
off = val & TDB_OFF_MASK;
ecode = tdb_read_convert(tdb, off, rec, sizeof(*rec));
if (ecode != TDB_SUCCESS) {
- return ecode;
+ return (tdb_bool_err)ecode;
}
if ((h->h & ((1 << 11)-1)) != rec_hash(rec)) {
h->group_start = off;
ecode = tdb_read_convert(tdb, off, h->group, sizeof(h->group));
if (ecode != TDB_SUCCESS) {
- return ecode;
+ return TDB_ERR_TO_OFF(ecode);
}
for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
ecode = tdb_read_convert(tdb, recoff, rec,
sizeof(*rec));
if (ecode != TDB_SUCCESS) {
- return ecode;
+ return TDB_ERR_TO_OFF(ecode);
}
- ecode = key_matches(tdb, rec, recoff, &key);
+ ecode = TDB_OFF_TO_ERR(key_matches(tdb, rec, recoff,
+ &key));
if (ecode < 0) {
- return ecode;
+ return TDB_ERR_TO_OFF(ecode);
}
- if (ecode == 1) {
+ if (ecode == (enum TDB_ERROR)1) {
h->home_bucket = h->found_bucket = i;
if (tinfo) {
ecode = tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype,
TDB_LOCK_WAIT);
if (ecode != TDB_SUCCESS) {
- return ecode;
+ return TDB_ERR_TO_OFF(ecode);
}
hashtable = offsetof(struct tdb_header, hashtable);
berr = match(tdb, h, &key, h->group[h->found_bucket],
rec);
if (berr < 0) {
- ecode = berr;
+ ecode = TDB_OFF_TO_ERR(berr);
goto fail;
}
if (berr) {
fail:
tdb_unlock_hashes(tdb, h->hlock_start, h->hlock_range, ltype);
- return ecode;
+ return TDB_ERR_TO_OFF(ecode);
}
/* I wrote a simple test, expanding a hash to 2GB, for the following
entry = tdb_find_zero_off(tdb, subhash, 1<<TDB_HASH_GROUP_BITS);
if (TDB_OFF_IS_ERR(entry)) {
- return entry;
+ return TDB_OFF_TO_ERR(entry);
}
if (entry == 1 << TDB_HASH_GROUP_BITS) {
next = tdb_read_off(tdb, subhash
+ offsetof(struct tdb_chain, next));
if (TDB_OFF_IS_ERR(next)) {
- return next;
+ return TDB_OFF_TO_ERR(next);
}
if (!next) {
next = alloc(tdb, 0, sizeof(struct tdb_chain), 0,
TDB_CHAIN_MAGIC, false);
if (TDB_OFF_IS_ERR(next))
- return next;
+ return TDB_OFF_TO_ERR(next);
ecode = zero_out(tdb,
next+sizeof(struct tdb_used_record),
sizeof(struct tdb_chain));
subhash = alloc(tdb, 0, subsize, 0, magic, false);
if (TDB_OFF_IS_ERR(subhash)) {
- return subhash;
+ return TDB_OFF_TO_ERR(subhash);
}
ecode = zero_out(tdb, subhash + sizeof(struct tdb_used_record),
struct tdb_used_record rec;
if (TDB_OFF_IS_ERR(off)) {
- ecode = off;
+ ecode = TDB_OFF_TO_ERR(off);
goto fail;
}
val = tdb_access_read(tdb, base + start * sizeof(tdb_off_t),
(end - start) * sizeof(tdb_off_t), false);
if (TDB_PTR_IS_ERR(val)) {
- return TDB_PTR_ERR(val);
+ return TDB_ERR_TO_OFF(TDB_PTR_ERR(val));
}
for (i = 0; i < (end - start); i++) {
/* Zero vs non-zero is the same unconverted: minor optimization. */
val = tdb_access_read(tdb, off, num * sizeof(tdb_off_t), false);
if (TDB_PTR_IS_ERR(val)) {
- return TDB_PTR_ERR(val);
+ return TDB_ERR_TO_OFF(TDB_PTR_ERR(val));
}
for (i = 0; i < num; i++) {
tdb_off_t *p = tdb->tdb2.io->direct(tdb, off, sizeof(*p),
false);
if (TDB_PTR_IS_ERR(p)) {
- return TDB_PTR_ERR(p);
+ return TDB_ERR_TO_OFF(TDB_PTR_ERR(p));
}
if (p)
return *p;
ecode = tdb_read_convert(tdb, off, &ret, sizeof(ret));
if (ecode != TDB_SUCCESS) {
- return ecode;
+ return TDB_ERR_TO_OFF(ecode);
}
return ret;
}
tdb_brunlock(tdb, ltype, offset, 1);
if (berr < 0)
- return berr;
+ return TDB_OFF_TO_ERR(berr);
ecode = tdb_lock_and_recover(tdb);
if (ecode == TDB_SUCCESS) {
ecode = tdb_brlock(tdb, ltype, offset, 1,
tdb_allrecord_unlock(tdb, ltype);
if (berr < 0)
- return berr;
+ return TDB_OFF_TO_ERR(berr);
ecode = tdb_lock_and_recover(tdb);
if (ecode != TDB_SUCCESS) {
return ecode;
berr = tdb_needs_recovery(tdb);
if (unlikely(berr != false)) {
if (berr < 0) {
- ecode = berr;
+ ecode = TDB_OFF_TO_ERR(berr);
goto fail;
}
ecode = tdb_lock_and_recover(tdb);
fail:
/* Map ecode to some logical errno. */
- switch (ecode) {
- case TDB_ERR_CORRUPT:
- case TDB_ERR_IO:
+ switch (TDB_ERR_TO_OFF(ecode)) {
+ case TDB_ERR_TO_OFF(TDB_ERR_CORRUPT):
+ case TDB_ERR_TO_OFF(TDB_ERR_IO):
saved_errno = EIO;
break;
- case TDB_ERR_LOCK:
+ case TDB_ERR_TO_OFF(TDB_ERR_LOCK):
saved_errno = EWOULDBLOCK;
break;
- case TDB_ERR_OOM:
+ case TDB_ERR_TO_OFF(TDB_ERR_OOM):
saved_errno = ENOMEM;
break;
- case TDB_ERR_EINVAL:
+ case TDB_ERR_TO_OFF(TDB_ERR_EINVAL):
saved_errno = EINVAL;
break;
default:
#define TDB_RECOVERY_MAGIC (0xf53bc0e7ad124589ULL)
#define TDB_RECOVERY_INVALID_MAGIC (0x0ULL)
-#define TDB_OFF_IS_ERR(off) unlikely(off >= (tdb_off_t)TDB_ERR_LAST)
+#define TDB_OFF_IS_ERR(off) unlikely(off >= (tdb_off_t)(long)TDB_ERR_LAST)
+#define TDB_OFF_TO_ERR(off) ((enum TDB_ERROR)(long)(off))
+#define TDB_ERR_TO_OFF(ecode) ((tdb_off_t)(long)(ecode))
/* Packing errors into pointers and v.v. */
#define TDB_PTR_IS_ERR(ptr) \
h = tdb_access_read(tdb, hash_off, sizeof(*h) << bits, true);
if (TDB_PTR_IS_ERR(h)) {
- return TDB_PTR_ERR(h);
+ return TDB_ERR_TO_OFF(TDB_PTR_ERR(h));
}
for (i = 0; i < (1 << bits); i++)
count += (h[i] != 0);
off + sizeof(p->u),
TDB_SUBLEVEL_HASH_BITS);
if (TDB_OFF_IS_ERR(count)) {
- return count;
+ return TDB_OFF_TO_ERR(count);
}
tally_add(hashes, count);
tally_add(extra, rec_extra_padding(&p->u));
} else {
len = dead_space(tdb, off);
if (TDB_OFF_IS_ERR(len)) {
- return len;
+ return TDB_OFF_TO_ERR(len);
}
}
tdb_access_release(tdb, p);
new_off = alloc(tdb, key.dsize, dbuf.dsize, h->h, TDB_USED_MAGIC,
growing);
if (TDB_OFF_IS_ERR(new_off)) {
- return new_off;
+ return TDB_OFF_TO_ERR(new_off);
}
/* We didn't like the existing one: remove it. */
off = find_and_lock(tdb, key, F_WRLCK, &h, &rec, NULL);
if (TDB_OFF_IS_ERR(off)) {
- return tdb->last_error = off;
+ return tdb->last_error = TDB_OFF_TO_ERR(off);
}
/* Now we have lock on this hash bucket. */
off = find_and_lock(tdb, key, F_WRLCK, &h, &rec, NULL);
if (TDB_OFF_IS_ERR(off)) {
- return tdb->last_error = off;
+ return tdb->last_error = TDB_OFF_TO_ERR(off);
}
if (off) {
off = find_and_lock(tdb, key, F_RDLCK, &h, &rec, NULL);
if (TDB_OFF_IS_ERR(off)) {
- return tdb->last_error = off;
+ return tdb->last_error = TDB_OFF_TO_ERR(off);
}
if (!off) {
off = find_and_lock(tdb, key, F_RDLCK, &h, &rec, NULL);
if (TDB_OFF_IS_ERR(off)) {
- tdb->last_error = off;
+ tdb->last_error = TDB_OFF_TO_ERR(off);
return false;
}
tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_RDLCK);
off = find_and_lock(tdb, key, F_WRLCK, &h, &rec, NULL);
if (TDB_OFF_IS_ERR(off)) {
- return tdb->last_error = off;
+ return tdb->last_error = TDB_OFF_TO_ERR(off);
}
if (!off) {
const char *tdb_errorstr(enum TDB_ERROR ecode)
{
/* Gcc warns if you miss a case in the switch, so use that. */
- switch (ecode) {
- case TDB_SUCCESS: return "Success";
- case TDB_ERR_CORRUPT: return "Corrupt database";
- case TDB_ERR_IO: return "IO Error";
- case TDB_ERR_LOCK: return "Locking error";
- case TDB_ERR_OOM: return "Out of memory";
- case TDB_ERR_EXISTS: return "Record exists";
- case TDB_ERR_EINVAL: return "Invalid parameter";
- case TDB_ERR_NOEXIST: return "Record does not exist";
- case TDB_ERR_RDONLY: return "write not permitted";
+ switch (TDB_ERR_TO_OFF(ecode)) {
+ case TDB_ERR_TO_OFF(TDB_SUCCESS): return "Success";
+ case TDB_ERR_TO_OFF(TDB_ERR_CORRUPT): return "Corrupt database";
+ case TDB_ERR_TO_OFF(TDB_ERR_IO): return "IO Error";
+ case TDB_ERR_TO_OFF(TDB_ERR_LOCK): return "Locking error";
+ case TDB_ERR_TO_OFF(TDB_ERR_OOM): return "Out of memory";
+ case TDB_ERR_TO_OFF(TDB_ERR_EXISTS): return "Record exists";
+ case TDB_ERR_TO_OFF(TDB_ERR_EINVAL): return "Invalid parameter";
+ case TDB_ERR_TO_OFF(TDB_ERR_NOEXIST): return "Record does not exist";
+ case TDB_ERR_TO_OFF(TDB_ERR_RDONLY): return "write not permitted";
}
return "Invalid error code";
}
off = find_and_lock(tdb, key, F_RDLCK, &h, &rec, NULL);
if (TDB_OFF_IS_ERR(off)) {
- return tdb->last_error = off;
+ return tdb->last_error = TDB_OFF_TO_ERR(off);
}
if (!off) {
val = tdb1_get_seqnum(tdb);
if (tdb->last_error != TDB_SUCCESS)
- return tdb->last_error;
+ return TDB_ERR_TO_OFF(tdb->last_error);
else
return val;
}
off = tdb_read_off(tdb, offsetof(struct tdb_header, seqnum));
if (TDB_OFF_IS_ERR(off))
- tdb->last_error = off;
+ tdb->last_error = TDB_OFF_TO_ERR(off);
else
tdb->last_error = TDB_SUCCESS;
return off;
/* find the recovery area */
if (tdb1_ofs_read(tdb, TDB1_RECOVERY_HEAD, &recovery_head) == -1) {
- return tdb->last_error;
+ return TDB_ERR_TO_OFF(tdb->last_error);
}
if (recovery_head == 0) {
/* read the recovery record */
if (tdb->tdb1.io->tdb1_read(tdb, recovery_head, &rec,
sizeof(rec), TDB1_DOCONV()) == -1) {
- return tdb->last_error;
+ return TDB_ERR_TO_OFF(tdb->last_error);
}
return (rec.magic == TDB1_RECOVERY_MAGIC);
*recovery_offset = tdb_read_off(tdb,
offsetof(struct tdb_header, recovery));
if (TDB_OFF_IS_ERR(*recovery_offset)) {
- return *recovery_offset;
+ return TDB_OFF_TO_ERR(*recovery_offset);
}
if (*recovery_offset == 0) {
tdb->stats.transaction_expand_file++;
ecode = methods->expand_file(tdb, addition);
if (ecode != TDB_SUCCESS) {
- return tdb_logerr(tdb, ecode, TDB_LOG_ERROR,
- "tdb_recovery_allocate:"
- " failed to create recovery area");
+ tdb_logerr(tdb, ecode, TDB_LOG_ERROR,
+ "tdb_recovery_allocate:"
+ " failed to create recovery area");
+ return TDB_ERR_TO_OFF(ecode);
}
/* we have to reset the old map size so that we don't try to
ecode = methods->twrite(tdb, offsetof(struct tdb_header, recovery),
&recovery_off, sizeof(tdb_off_t));
if (ecode != TDB_SUCCESS) {
- return tdb_logerr(tdb, ecode, TDB_LOG_ERROR,
- "tdb_recovery_allocate:"
- " failed to write recovery head");
+ tdb_logerr(tdb, ecode, TDB_LOG_ERROR,
+ "tdb_recovery_allocate:"
+ " failed to write recovery head");
+ return TDB_ERR_TO_OFF(ecode);
}
transaction_write_existing(tdb, offsetof(struct tdb_header, recovery),
&recovery_off,
recovery);
if (TDB_OFF_IS_ERR(recovery_off)) {
free(recovery);
- return recovery_off;
+ return TDB_OFF_TO_ERR(recovery_off);
}
}
/* find the recovery area */
recovery_head = tdb_read_off(tdb, offsetof(struct tdb_header,recovery));
if (TDB_OFF_IS_ERR(recovery_head)) {
- return tdb_logerr(tdb, recovery_head, TDB_LOG_ERROR,
+ ecode = TDB_OFF_TO_ERR(recovery_head);
+ return tdb_logerr(tdb, ecode, TDB_LOG_ERROR,
"tdb_transaction_recover:"
" failed to read recovery head");
}
/* read the recovery record */
ecode = tdb_read_convert(tdb, recovery_head, &rec, sizeof(rec));
if (ecode != TDB_SUCCESS) {
- return ecode;
+ return TDB_ERR_TO_OFF(ecode);
}
return (rec.magic == TDB_RECOVERY_MAGIC);
if (tdb->flags & TDB_VERSION1) {
count = tdb1_traverse(tdb, fn, p);
if (count == -1)
- return tdb->last_error;
+ return TDB_ERR_TO_OFF(tdb->last_error);
return count;
}
}
if (ecode != TDB_ERR_NOEXIST) {
- return tdb->last_error = ecode;
+ return TDB_ERR_TO_OFF(tdb->last_error = ecode);
}
tdb->last_error = TDB_SUCCESS;
return count;
tinfo.prev = find_and_lock(tdb, *key, F_RDLCK, &h, &rec, &tinfo);
free(key->dptr);
if (TDB_OFF_IS_ERR(tinfo.prev)) {
- return tdb->last_error = tinfo.prev;
+ return tdb->last_error = TDB_OFF_TO_ERR(tinfo.prev);
}
tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_RDLCK);
/* FIXME: Be smarter. */
count = tdb_traverse(tdb, wipe_one, &ecode);
if (count < 0)
- ecode = count;
+ ecode = TDB_OFF_TO_ERR(count);
tdb_allrecord_unlock(tdb, F_WRLCK);
return tdb->last_error = ecode;
}