tdb->ftable = 0;
while (off) {
- if (off == TDB_OFF_ERR)
+ if (TDB_OFF_IS_ERR(off)) {
+ tdb->ecode = off;
return -1;
+ }
rnd = random();
if (rnd >= max) {
+ bucket * sizeof(tdb_off_t);
}
-/* Returns free_buckets + 1, or list number to search. */
+/* Returns free_buckets + 1, or list number to search, or -ve error. */
static tdb_off_t find_free_head(struct tdb_context *tdb,
tdb_off_t ftable_off,
tdb_off_t bucket)
const struct tdb_free_record *r)
{
tdb_off_t off;
+ enum TDB_ERROR ecode;
/* Front of list? */
if (frec_prev(r) == 0) {
#ifdef CCAN_TDB2_DEBUG
if (tdb_read_off(tdb, off) != r_off) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"remove_from_list: %llu bad prev in list %llu",
(long long)r_off, (long long)b_off);
return -1;
#endif
/* r->prev->next = r->next */
- if (tdb_write_off(tdb, off, r->next)) {
+ ecode = tdb_write_off(tdb, off, r->next);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return -1;
}
#ifdef CCAN_TDB2_DEBUG
if (tdb_read_off(tdb, off) & TDB_OFF_MASK != r_off) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"remove_from_list: %llu bad list %llu",
(long long)r_off, (long long)b_off);
return -1;
}
#endif
- if (tdb_write_off(tdb, off, r->magic_and_prev)) {
+ ecode = tdb_write_off(tdb, off, r->magic_and_prev);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return -1;
}
}
tdb_len_t len)
{
struct tdb_free_record new;
+ enum TDB_ERROR ecode;
uint64_t magic = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL));
/* We only need to set ftable_and_len; rest is set in enqueue_in_free */
/* new->next = head. */
new.next = tdb_read_off(tdb, b_off);
- if (new.next == TDB_OFF_ERR)
+ if (TDB_OFF_IS_ERR(new.next)) {
+ tdb->ecode = new.next;
return -1;
+ }
if (new.next) {
#ifdef CCAN_TDB2_DEBUG
new.next + offsetof(struct tdb_free_record,
magic_and_prev))
!= magic) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"enqueue_in_free: %llu bad head"
" prev %llu",
(long long)new.next, (long long)b_off);
}
#endif
/* next->prev = new. */
- if (tdb_write_off(tdb, new.next
- + offsetof(struct tdb_free_record,
- magic_and_prev),
- off | magic) != 0)
+ ecode = tdb_write_off(tdb, new.next
+ + offsetof(struct tdb_free_record,
+ magic_and_prev),
+ off | magic);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return -1;
+ }
}
/* head = new */
- if (tdb_write_off(tdb, b_off, off) != 0)
+ ecode = tdb_write_off(tdb, b_off, off);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return -1;
+ }
- return tdb_write_convert(tdb, off, &new, sizeof(new));
+ ecode = tdb_write_convert(tdb, off, &new, sizeof(new));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
+ return -1;
+ }
+ return 0;
}
/* List need not be locked. */
tdb_off_t b_off;
tdb_len_t len;
int ret;
+ enum TDB_ERROR ecode;
assert(len_with_header >= sizeof(struct tdb_free_record));
len = len_with_header - sizeof(struct tdb_used_record);
b_off = bucket_off(tdb->ftable_off, size_to_bucket(len));
- if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) != 0)
+ ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return -1;
+ }
ret = enqueue_in_free(tdb, b_off, off, len);
tdb_unlock_free_bucket(tdb, b_off);
return tdb->ftable_off;
off = first_ftable(tdb);
- for (i = 0; i < ftable; i++)
+ for (i = 0; i < ftable; i++) {
+ if (TDB_OFF_IS_ERR(off)) {
+ tdb->ecode = off;
+ break;
+ }
off = next_ftable(tdb, off);
+ }
return off;
}
{
tdb_off_t end;
struct tdb_free_record rec;
+ enum TDB_ERROR ecode;
add_stat(tdb, alloc_coalesce_tried, 1);
end = off + sizeof(struct tdb_used_record) + data_len;
unsigned ftable, bucket;
r = tdb_access_read(tdb, end, sizeof(*r), true);
- if (!r)
+ if (TDB_PTR_IS_ERR(r)) {
+ tdb->ecode = TDB_PTR_ERR(r);
goto err;
+ }
if (frec_magic(r) != TDB_FREE_MAGIC
|| frec_ftable(r) == TDB_FTABLE_NONE) {
tdb_access_release(tdb, r);
/* We may be violating lock order here, so best effort. */
- if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT) == -1) {
+ if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT)
+ != TDB_SUCCESS) {
add_stat(tdb, alloc_coalesce_lockfail, 1);
break;
}
/* Now we have lock, re-check. */
- if (tdb_read_convert(tdb, end, &rec, sizeof(rec))) {
+ ecode = tdb_read_convert(tdb, end, &rec, sizeof(rec));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
tdb_unlock_free_bucket(tdb, nb_off);
goto err;
}
return 0;
/* OK, expand initial record */
- if (tdb_read_convert(tdb, off, &rec, sizeof(rec)))
+ ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
goto err;
+ }
if (frec_len(&rec) != data_len) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"coalesce: expected data len %zu not %zu",
(size_t)data_len, (size_t)frec_len(&rec));
goto err;
* doesn't get coalesced by someone else! */
rec.ftable_and_len = (TDB_FTABLE_NONE << (64 - TDB_OFF_UPPER_STEAL))
| (end - off - sizeof(struct tdb_used_record));
- if (tdb_write_off(tdb, off + offsetof(struct tdb_free_record,
- ftable_and_len),
- rec.ftable_and_len) != 0)
+ ecode = tdb_write_off(tdb, off + offsetof(struct tdb_free_record,
+ ftable_and_len),
+ rec.ftable_and_len);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
goto err;
+ }
add_stat(tdb, alloc_coalesce_succeeded, 1);
tdb_unlock_free_bucket(tdb, b_off);
struct tdb_free_record best = { 0 };
double multiplier;
size_t size = adjust_size(keylen, datalen);
+ enum TDB_ERROR ecode;
add_stat(tdb, allocs, 1);
again:
/* FIXME: Try non-blocking wait first, to measure contention. */
/* Lock this bucket. */
- if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == -1) {
+ ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return TDB_OFF_ERR;
}
/* Walk the list to see if any are large enough, getting less fussy
* as we go. */
off = tdb_read_off(tdb, b_off);
- if (unlikely(off == TDB_OFF_ERR))
+ if (TDB_OFF_IS_ERR(off)) {
+ tdb->ecode = off;
goto unlock_err;
+ }
while (off) {
const struct tdb_free_record *r;
tdb_off_t next;
r = tdb_access_read(tdb, off, sizeof(*r), true);
- if (!r)
+ if (TDB_PTR_IS_ERR(r)) {
+ tdb->ecode = TDB_PTR_ERR(r);
goto unlock_err;
+ }
if (frec_magic(r) != TDB_FREE_MAGIC) {
tdb_access_release(tdb, r);
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"lock_and_alloc: %llu non-free 0x%llx",
(long long)off, (long long)r->magic_and_prev);
goto unlock_err;
frec_len(&best) - leftover, hashlow) != 0)
goto unlock_err;
- if (tdb_write_convert(tdb, best_off, &rec, sizeof(rec)) != 0)
+ ecode = tdb_write_convert(tdb, best_off, &rec, sizeof(rec));
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
goto unlock_err;
+ }
/* Bucket of leftover will be <= current bucket, so nested
* locking is allowed. */
unsigned magic, unsigned hashlow)
{
tdb_off_t off, ftable_off;
- unsigned start_b, b, ftable;
+ tdb_off_t start_b, b, ftable;
bool wrapped = false;
/* If they are growing, add 50% to get to higher bucket. */
/* Didn't work. Try next bucket. */
}
+ if (TDB_OFF_IS_ERR(b)) {
+ tdb->ecode = b;
+ return 0;
+ }
+
/* Hmm, try next table. */
ftable_off = next_ftable(tdb, ftable_off);
+ if (TDB_OFF_IS_ERR(ftable_off)) {
+ tdb->ecode = ftable_off;
+ return 0;
+ }
ftable++;
if (ftable_off == 0) {
wrapped = true;
ftable_off = first_ftable(tdb);
+ if (TDB_OFF_IS_ERR(ftable_off)) {
+ tdb->ecode = ftable_off;
+ return 0;
+ }
ftable = 0;
}
}
if (rec_key_length(rec) != keylen
|| rec_data_length(rec) != datalen
|| rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"Could not encode k=%llu,d=%llu,a=%llu",
(long long)keylen, (long long)datalen,
(long long)actuallen);
{
uint64_t old_size;
tdb_len_t wanted;
+ enum TDB_ERROR ecode;
/* We need room for the record header too. */
wanted = sizeof(struct tdb_used_record) + size;
/* Need to hold a hash lock to expand DB: transactions rely on it. */
if (!(tdb->flags & TDB_NOLOCK)
&& !tdb->allrecord_lock.count && !tdb_has_hash_locks(tdb)) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_expand: must hold lock during expand");
return -1;
}
wanted = adjust_size(0, wanted);
/* Only one person can expand file at a time. */
- if (tdb_lock_expand(tdb, F_WRLCK) != 0)
+ ecode = tdb_lock_expand(tdb, F_WRLCK);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
return -1;
+ }
/* Someone else may have expanded the file, so retry. */
old_size = tdb->map_size;
return 0;
}
- if (tdb->methods->expand_file(tdb, wanted) == -1) {
+ ecode = tdb->methods->expand_file(tdb, wanted);
+ if (ecode != TDB_SUCCESS) {
+ tdb->ecode = ecode;
tdb_unlock_expand(tdb, F_WRLCK);
return -1;
}