It was never clear to me which levels should be used for what cases.
I can only usefully distinguish three at the moment:
(1) TDB errors, which render the TDB unreliable.
(2) TDB user errors, caused by API misuse.
(3) TDB notifications of strange behaviour, from which we have recovered.
15 files changed:
hash_test = TDB_HASH_MAGIC;
hash_test = tdb_hash(tdb, &hash_test, sizeof(hash_test));
if (hdr.hash_test != hash_test) {
hash_test = TDB_HASH_MAGIC;
hash_test = tdb_hash(tdb, &hash_test, sizeof(hash_test));
if (hdr.hash_test != hash_test) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"check: hash test %llu should be %llu",
(long long)hdr.hash_test,
(long long)hash_test);
"check: hash test %llu should be %llu",
(long long)hdr.hash_test,
(long long)hash_test);
}
if (strcmp(hdr.magic_food, TDB_MAGIC_FOOD) != 0) {
}
if (strcmp(hdr.magic_food, TDB_MAGIC_FOOD) != 0) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"check: bad magic '%.*s'",
(unsigned)sizeof(hdr.magic_food), hdr.magic_food);
return false;
"check: bad magic '%.*s'",
(unsigned)sizeof(hdr.magic_food), hdr.magic_food);
return false;
*recovery = hdr.recovery;
if (*recovery) {
if (*recovery < sizeof(hdr) || *recovery > tdb->map_size) {
*recovery = hdr.recovery;
if (*recovery) {
if (*recovery < sizeof(hdr) || *recovery > tdb->map_size) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: invalid recovery offset %zu",
(size_t)*recovery);
return false;
"tdb_check: invalid recovery offset %zu",
(size_t)*recovery);
return false;
return false;
if (rec_magic(&rec) != TDB_CHAIN_MAGIC) {
return false;
if (rec_magic(&rec) != TDB_CHAIN_MAGIC) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: Bad hash chain magic %llu",
(long long)rec_magic(&rec));
return false;
}
if (rec_data_length(&rec) != sizeof(struct tdb_chain)) {
"tdb_check: Bad hash chain magic %llu",
(long long)rec_magic(&rec));
return false;
}
if (rec_data_length(&rec) != sizeof(struct tdb_chain)) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: Bad hash chain length %llu vs %zu",
(long long)rec_data_length(&rec),
sizeof(struct tdb_chain));
return false;
}
if (rec_key_length(&rec) != 0) {
"tdb_check: Bad hash chain length %llu vs %zu",
(long long)rec_data_length(&rec),
sizeof(struct tdb_chain));
return false;
}
if (rec_key_length(&rec) != 0) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: Bad hash chain key length %llu",
(long long)rec_key_length(&rec));
return false;
}
if (rec_hash(&rec) != 0) {
"tdb_check: Bad hash chain key length %llu",
(long long)rec_key_length(&rec));
return false;
}
if (rec_hash(&rec) != 0) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: Bad hash chain hash value %llu",
(long long)rec_hash(&rec));
return false;
"tdb_check: Bad hash chain hash value %llu",
(long long)rec_hash(&rec));
return false;
return false;
if (rec_magic(&rec) != TDB_HTABLE_MAGIC) {
return false;
if (rec_magic(&rec) != TDB_HTABLE_MAGIC) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: Bad hash table magic %llu",
(long long)rec_magic(&rec));
return false;
}
if (rec_data_length(&rec)
!= sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS) {
"tdb_check: Bad hash table magic %llu",
(long long)rec_magic(&rec));
return false;
}
if (rec_data_length(&rec)
!= sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: Bad hash table length %llu vs %llu",
(long long)rec_data_length(&rec),
(long long)sizeof(tdb_off_t)
"tdb_check: Bad hash table length %llu vs %llu",
(long long)rec_data_length(&rec),
(long long)sizeof(tdb_off_t)
return false;
}
if (rec_key_length(&rec) != 0) {
return false;
}
if (rec_key_length(&rec) != 0) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: Bad hash table key length %llu",
(long long)rec_key_length(&rec));
return false;
}
if (rec_hash(&rec) != 0) {
"tdb_check: Bad hash table key length %llu",
(long long)rec_key_length(&rec));
return false;
}
if (rec_hash(&rec) != 0) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: Bad hash table hash value %llu",
(long long)rec_hash(&rec));
return false;
"tdb_check: Bad hash table hash value %llu",
(long long)rec_hash(&rec));
return false;
p = asearch(&off, used, num_used, off_cmp);
if (!p) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
p = asearch(&off, used, num_used, off_cmp);
if (!p) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"tdb_check: Invalid offset %llu "
"in hash", (long long)off);
goto fail;
"tdb_check: Invalid offset %llu "
"in hash", (long long)off);
goto fail;
/* Chained entries are unordered. */
if (is_subhash(group[b])) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
/* Chained entries are unordered. */
if (is_subhash(group[b])) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"tdb_check: Invalid chain"
" entry subhash");
goto fail;
"tdb_check: Invalid chain"
" entry subhash");
goto fail;
h = hash_record(tdb, off);
if (h != hprefix) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
h = hash_record(tdb, off);
if (h != hprefix) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"check: bad hash chain"
" placement"
" 0x%llx vs 0x%llx",
"check: bad hash chain"
" placement"
" 0x%llx vs 0x%llx",
if (get_bits(h, hprefix_bits, &used_bits) != hprefix
&& hprefix_bits) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
if (get_bits(h, hprefix_bits, &used_bits) != hprefix
&& hprefix_bits) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"check: bad hash placement"
" 0x%llx vs 0x%llx",
(long long)h, (long long)hprefix);
"check: bad hash placement"
" 0x%llx vs 0x%llx",
(long long)h, (long long)hprefix);
/* Does it belong in this group? */
if (get_bits(h, group_bits, &used_bits) != g) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
/* Does it belong in this group? */
if (get_bits(h, group_bits, &used_bits) != g) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"check: bad group %llu vs %u",
(long long)h, g);
goto fail;
"check: bad group %llu vs %u",
(long long)h, g);
goto fail;
!= bucket) {
used_bits -= TDB_HASH_GROUP_BITS;
tdb_logerr(tdb, TDB_ERR_CORRUPT,
!= bucket) {
used_bits -= TDB_HASH_GROUP_BITS;
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"check: bad bucket %u vs %u",
(unsigned)get_bits(h,
TDB_HASH_GROUP_BITS,
"check: bad bucket %u vs %u",
(unsigned)get_bits(h,
TDB_HASH_GROUP_BITS,
i = (i + 1) % (1 << TDB_HASH_GROUP_BITS)) {
if (group[i] == 0) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
i = (i + 1) % (1 << TDB_HASH_GROUP_BITS)) {
if (group[i] == 0) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"check: bad group placement"
" %u vs %u",
b, bucket);
"check: bad group placement"
" %u vs %u",
b, bucket);
/* Bottom bits must match header. */
if ((h & ((1 << 11)-1)) != rec_hash(&rec)) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
/* Bottom bits must match header. */
if ((h & ((1 << 11)-1)) != rec_hash(&rec)) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"tdb_check: Bad hash magic at"
" offset %llu (0x%llx vs 0x%llx)",
(long long)off,
"tdb_check: Bad hash magic at"
" offset %llu (0x%llx vs 0x%llx)",
(long long)off,
return false;
if (num_found != num_used) {
return false;
if (num_found != num_used) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: Not all entries are in hash");
return false;
}
"tdb_check: Not all entries are in hash");
return false;
}
unsigned int bucket)
{
if (frec_magic(frec) != TDB_FREE_MAGIC) {
unsigned int bucket)
{
if (frec_magic(frec) != TDB_FREE_MAGIC) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: offset %llu bad magic 0x%llx",
(long long)off, (long long)frec->magic_and_prev);
return false;
}
if (frec_ftable(frec) != ftable) {
"tdb_check: offset %llu bad magic 0x%llx",
(long long)off, (long long)frec->magic_and_prev);
return false;
}
if (frec_ftable(frec) != ftable) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: offset %llu bad freetable %u",
(long long)off, frec_ftable(frec));
return false;
"tdb_check: offset %llu bad freetable %u",
(long long)off, frec_ftable(frec));
return false;
false))
return false;
if (size_to_bucket(frec_len(frec)) != bucket) {
false))
return false;
if (size_to_bucket(frec_len(frec)) != bucket) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: offset %llu in wrong bucket %u vs %u",
(long long)off,
bucket, size_to_bucket(frec_len(frec)));
return false;
}
if (prev != frec_prev(frec)) {
"tdb_check: offset %llu in wrong bucket %u vs %u",
(long long)off,
bucket, size_to_bucket(frec_len(frec)));
return false;
}
if (prev != frec_prev(frec)) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: offset %llu bad prev %llu vs %llu",
(long long)off,
(long long)prev, (long long)frec_len(frec));
"tdb_check: offset %llu bad prev %llu vs %llu",
(long long)off,
(long long)prev, (long long)frec_len(frec));
|| rec_key_length(&ft.hdr) != 0
|| rec_data_length(&ft.hdr) != sizeof(ft) - sizeof(ft.hdr)
|| rec_hash(&ft.hdr) != 0) {
|| rec_key_length(&ft.hdr) != 0
|| rec_data_length(&ft.hdr) != sizeof(ft) - sizeof(ft.hdr)
|| rec_hash(&ft.hdr) != 0) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: Invalid header on free table");
return false;
}
"tdb_check: Invalid header on free table");
return false;
}
p = asearch(&off, fr, num_free, off_cmp);
if (!p) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
p = asearch(&off, fr, num_free, off_cmp);
if (!p) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"tdb_check: Invalid offset"
" %llu in free table",
(long long)off);
"tdb_check: Invalid offset"
" %llu in free table",
(long long)off);
len = dead_space(tdb, off);
if (len < sizeof(rec.r)) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
len = dead_space(tdb, off);
if (len < sizeof(rec.r)) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"tdb_check: invalid dead"
" space at %zu",
(size_t)off);
return false;
}
"tdb_check: invalid dead"
" space at %zu",
(size_t)off);
return false;
}
- tdb_logerr(tdb, TDB_SUCCESS, TDB_DEBUG_WARNING,
+ tdb_logerr(tdb, TDB_SUCCESS, TDB_LOG_WARNING,
"Dead space at %zu-%zu (of %zu)",
(size_t)off, (size_t)(off + len),
(size_t)tdb->map_size);
"Dead space at %zu-%zu (of %zu)",
(size_t)off, (size_t)(off + len),
(size_t)tdb->map_size);
return false;
if (recovery != off) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
return false;
if (recovery != off) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"tdb_check: unexpected recovery"
" record at offset %zu",
(size_t)off);
"tdb_check: unexpected recovery"
" record at offset %zu",
(size_t)off);
}
if (rec.r.len > rec.r.max_len) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
}
if (rec.r.len > rec.r.max_len) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"tdb_check: invalid recovery length"
" %zu", (size_t)rec.r.len);
return false;
}
if (rec.r.eof > tdb->map_size) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"tdb_check: invalid recovery length"
" %zu", (size_t)rec.r.len);
return false;
}
if (rec.r.eof > tdb->map_size) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"tdb_check: invalid old EOF"
" %zu", (size_t)rec.r.eof);
return false;
"tdb_check: invalid old EOF"
" %zu", (size_t)rec.r.eof);
return false;
len = sizeof(rec.u) + frec_len(&rec.f);
if (off + len > tdb->map_size) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
len = sizeof(rec.u) + frec_len(&rec.f);
if (off + len > tdb->map_size) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"tdb_check: free overlength %llu"
" at offset %llu",
(long long)len, (long long)off);
"tdb_check: free overlength %llu"
" at offset %llu",
(long long)len, (long long)off);
if (frec_ftable(&rec.f) != TDB_FTABLE_NONE
&& !append(fr, num_free, off)) {
tdb_logerr(tdb, TDB_ERR_OOM,
if (frec_ftable(&rec.f) != TDB_FTABLE_NONE
&& !append(fr, num_free, off)) {
tdb_logerr(tdb, TDB_ERR_OOM,
"tdb_check: tracking %zu'th"
" free record.", *num_free);
return false;
"tdb_check: tracking %zu'th"
" free record.", *num_free);
return false;
/* This record is used! */
if (!append(used, num_used, off)) {
tdb_logerr(tdb, TDB_ERR_OOM,
/* This record is used! */
if (!append(used, num_used, off)) {
tdb_logerr(tdb, TDB_ERR_OOM,
"tdb_check: tracking %zu'th"
" used record.", *num_used);
return false;
"tdb_check: tracking %zu'th"
" used record.", *num_used);
return false;
len = sizeof(rec.u) + klen + dlen + extra;
if (off + len > tdb->map_size) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
len = sizeof(rec.u) + klen + dlen + extra;
if (off + len > tdb->map_size) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"tdb_check: used overlength %llu"
" at offset %llu",
(long long)len, (long long)off);
"tdb_check: used overlength %llu"
" at offset %llu",
(long long)len, (long long)off);
if (len < sizeof(rec.f)) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
if (len < sizeof(rec.f)) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"tdb_check: too short record %llu"
" at %llu",
(long long)len, (long long)off);
"tdb_check: too short record %llu"
" at %llu",
(long long)len, (long long)off);
}
} else {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
}
} else {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"tdb_check: Bad magic 0x%llx at offset %zu",
(long long)rec_magic(&rec.u), (size_t)off);
return false;
"tdb_check: Bad magic 0x%llx at offset %zu",
(long long)rec_magic(&rec.u), (size_t)off);
return false;
/* We must have found recovery area if there was one. */
if (recovery != 0 && !found_recovery) {
/* We must have found recovery area if there was one. */
if (recovery != 0 && !found_recovery) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: expected a recovery area at %zu",
(size_t)recovery);
return false;
"tdb_check: expected a recovery area at %zu",
(size_t)recovery);
return false;
goto fail;
if (num_found != num_free) {
goto fail;
if (num_found != num_free) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check: Not all entries are in free table");
return -1;
}
"tdb_check: Not all entries are in free table");
return -1;
}
#ifdef CCAN_TDB2_DEBUG
if (tdb_read_off(tdb, off) != r_off) {
#ifdef CCAN_TDB2_DEBUG
if (tdb_read_off(tdb, off) != r_off) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"remove_from_list: %llu bad prev in list %llu",
(long long)r_off, (long long)b_off);
return -1;
"remove_from_list: %llu bad prev in list %llu",
(long long)r_off, (long long)b_off);
return -1;
#ifdef CCAN_TDB2_DEBUG
if (tdb_read_off(tdb, off) & TDB_OFF_MASK != r_off) {
#ifdef CCAN_TDB2_DEBUG
if (tdb_read_off(tdb, off) & TDB_OFF_MASK != r_off) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"remove_from_list: %llu bad list %llu",
(long long)r_off, (long long)b_off);
return -1;
"remove_from_list: %llu bad list %llu",
(long long)r_off, (long long)b_off);
return -1;
new.next + offsetof(struct tdb_free_record,
magic_and_prev))
!= magic) {
new.next + offsetof(struct tdb_free_record,
magic_and_prev))
!= magic) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"enqueue_in_free: %llu bad head"
" prev %llu",
(long long)new.next, (long long)b_off);
"enqueue_in_free: %llu bad head"
" prev %llu",
(long long)new.next, (long long)b_off);
goto err;
if (frec_len(&rec) != data_len) {
goto err;
if (frec_len(&rec) != data_len) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"coalesce: expected data len %zu not %zu",
(size_t)data_len, (size_t)frec_len(&rec));
goto err;
"coalesce: expected data len %zu not %zu",
(size_t)data_len, (size_t)frec_len(&rec));
goto err;
if (frec_magic(r) != TDB_FREE_MAGIC) {
tdb_access_release(tdb, r);
if (frec_magic(r) != TDB_FREE_MAGIC) {
tdb_access_release(tdb, r);
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"lock_and_alloc: %llu non-free 0x%llx",
(long long)off, (long long)r->magic_and_prev);
goto unlock_err;
"lock_and_alloc: %llu non-free 0x%llx",
(long long)off, (long long)r->magic_and_prev);
goto unlock_err;
if (rec_key_length(rec) != keylen
|| rec_data_length(rec) != datalen
|| rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
if (rec_key_length(rec) != keylen
|| rec_data_length(rec) != datalen
|| rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"Could not encode k=%llu,d=%llu,a=%llu",
(long long)keylen, (long long)datalen,
(long long)actuallen);
"Could not encode k=%llu,d=%llu,a=%llu",
(long long)keylen, (long long)datalen,
(long long)actuallen);
/* Need to hold a hash lock to expand DB: transactions rely on it. */
if (!(tdb->flags & TDB_NOLOCK)
&& !tdb->allrecord_lock.count && !tdb_has_hash_locks(tdb)) {
/* Need to hold a hash lock to expand DB: transactions rely on it. */
if (!(tdb->flags & TDB_NOLOCK)
&& !tdb->allrecord_lock.count && !tdb_has_hash_locks(tdb)) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_expand: must hold lock during expand");
return -1;
}
"tdb_expand: must hold lock during expand");
return -1;
}
}
if (rec_magic(&rec) != TDB_USED_MAGIC) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
}
if (rec_magic(&rec) != TDB_USED_MAGIC) {
tdb_logerr(tdb, TDB_ERR_CORRUPT,
"next_in_hash:"
" corrupt record at %llu",
(long long)off);
"next_in_hash:"
" corrupt record at %llu",
(long long)off);
*/
if (tdb->map_ptr == MAP_FAILED) {
tdb->map_ptr = NULL;
*/
if (tdb->map_ptr == MAP_FAILED) {
tdb->map_ptr = NULL;
- tdb_logerr(tdb, TDB_SUCCESS, TDB_DEBUG_WARNING,
+ tdb_logerr(tdb, TDB_SUCCESS, TDB_LOG_WARNING,
"tdb_mmap failed for size %lld (%s)",
(long long)tdb->map_size, strerror(errno));
}
"tdb_mmap failed for size %lld (%s)",
(long long)tdb->map_size, strerror(errno));
}
return 0;
if (tdb->flags & TDB_INTERNAL) {
if (!probe) {
return 0;
if (tdb->flags & TDB_INTERNAL) {
if (!probe) {
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_oob len %lld beyond internal"
" malloc size %lld",
(long long)len,
"tdb_oob len %lld beyond internal"
" malloc size %lld",
(long long)len,
return -1;
if (fstat(tdb->fd, &st) != 0) {
return -1;
if (fstat(tdb->fd, &st) != 0) {
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"Failed to fstat file: %s", strerror(errno));
tdb_unlock_expand(tdb, F_RDLCK);
return -1;
"Failed to fstat file: %s", strerror(errno));
tdb_unlock_expand(tdb, F_RDLCK);
return -1;
if (st.st_size < (size_t)len) {
if (!probe) {
if (st.st_size < (size_t)len) {
if (!probe) {
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_oob len %zu beyond eof at %zu",
(size_t)len, st.st_size);
}
"tdb_oob len %zu beyond eof at %zu",
(size_t)len, st.st_size);
}
const void *buf, tdb_len_t len)
{
if (tdb->read_only) {
const void *buf, tdb_len_t len)
{
if (tdb->read_only) {
- tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_DEBUG_WARNING,
+ tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_LOG_USE_ERROR,
"Write to read-only database");
return -1;
}
"Write to read-only database");
return -1;
}
if (ret >= 0)
errno = ENOSPC;
if (ret >= 0)
errno = ENOSPC;
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_write: %zi at %zu len=%zu (%s)",
ret, (size_t)off, (size_t)len,
strerror(errno));
"tdb_write: %zi at %zu len=%zu (%s)",
ret, (size_t)off, (size_t)len,
strerror(errno));
} else {
ssize_t r = pread(tdb->fd, buf, len, off);
if (r != len) {
} else {
ssize_t r = pread(tdb->fd, buf, len, off);
if (r != len) {
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_read failed with %zi at %zu "
"len=%zu (%s) map_size=%zu",
r, (size_t)off, (size_t)len,
"tdb_read failed with %zi at %zu "
"len=%zu (%s) map_size=%zu",
r, (size_t)off, (size_t)len,
if (unlikely((tdb->flags & TDB_CONVERT))) {
void *conv = malloc(len);
if (!conv) {
if (unlikely((tdb->flags & TDB_CONVERT))) {
void *conv = malloc(len);
if (!conv) {
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"tdb_write: no memory converting"
" %zu bytes", len);
return -1;
"tdb_write: no memory converting"
" %zu bytes", len);
return -1;
int tdb_write_off(struct tdb_context *tdb, tdb_off_t off, tdb_off_t val)
{
if (tdb->read_only) {
int tdb_write_off(struct tdb_context *tdb, tdb_off_t off, tdb_off_t val)
{
if (tdb->read_only) {
- tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_DEBUG_WARNING,
+ tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_LOG_USE_ERROR,
"Write to read-only database");
return -1;
}
"Write to read-only database");
return -1;
}
/* some systems don't like zero length malloc */
buf = malloc(prefix + len ? prefix + len : 1);
if (!buf) {
/* some systems don't like zero length malloc */
buf = malloc(prefix + len ? prefix + len : 1);
if (!buf) {
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_USE_ERROR,
"tdb_alloc_read malloc failed len=%zu",
(size_t)(prefix + len));
} else if (unlikely(tdb->methods->tread(tdb, offset, buf+prefix, len)
"tdb_alloc_read malloc failed len=%zu",
(size_t)(prefix + len));
} else if (unlikely(tdb->methods->tread(tdb, offset, buf+prefix, len)
if (ret >= 0)
errno = ENOSPC;
if (ret >= 0)
errno = ENOSPC;
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"fill failed: %zi at %zu len=%zu (%s)",
ret, (size_t)off, (size_t)len,
strerror(errno));
"fill failed: %zi at %zu len=%zu (%s)",
ret, (size_t)off, (size_t)len,
strerror(errno));
char buf[8192];
if (tdb->read_only) {
char buf[8192];
if (tdb->read_only) {
- tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_DEBUG_WARNING,
+ tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_LOG_USE_ERROR,
"Expand on read-only database");
return -1;
}
"Expand on read-only database");
return -1;
}
if (tdb->flags & TDB_INTERNAL) {
char *new = realloc(tdb->map_ptr, tdb->map_size + addition);
if (!new) {
if (tdb->flags & TDB_INTERNAL) {
char *new = realloc(tdb->map_ptr, tdb->map_size + addition);
if (!new) {
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"No memory to expand database");
return -1;
}
"No memory to expand database");
return -1;
}
void *ret = NULL;
if (tdb->read_only) {
void *ret = NULL;
if (tdb->read_only) {
- tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_DEBUG_WARNING,
+ tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_LOG_USE_ERROR,
"Write to read-only database");
return NULL;
}
"Write to read-only database");
return NULL;
}
}
if (rw_type == F_WRLCK && tdb->read_only) {
}
if (rw_type == F_WRLCK && tdb->read_only) {
- tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_DEBUG_WARNING,
+ tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_LOG_USE_ERROR,
"Write lock attempted on read-only database");
return -1;
}
"Write lock attempted on read-only database");
return -1;
}
/* A 32 bit system cannot open a 64-bit file, but it could have
* expanded since then: check here. */
if ((size_t)(offset + len) != offset + len) {
/* A 32 bit system cannot open a 64-bit file, but it could have
* expanded since then: check here. */
if ((size_t)(offset + len) != offset + len) {
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_brlock: lock on giant offset %llu",
(long long)(offset + len));
return -1;
"tdb_brlock: lock on giant offset %llu",
(long long)(offset + len));
return -1;
* EAGAIN is an expected return from non-blocking
* locks. */
if (!(flags & TDB_LOCK_PROBE) && errno != EAGAIN) {
* EAGAIN is an expected return from non-blocking
* locks. */
if (!(flags & TDB_LOCK_PROBE) && errno != EAGAIN) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_brlock failed (fd=%d) at"
" offset %zu rw_type=%d flags=%d len=%zu:"
" %s",
"tdb_brlock failed (fd=%d) at"
" offset %zu rw_type=%d flags=%d len=%zu:"
" %s",
} while (ret == -1 && errno == EINTR);
if (ret == -1) {
} while (ret == -1 && errno == EINTR);
if (ret == -1) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_brunlock failed (fd=%d) at offset %zu"
" rw_type=%d len=%zu",
tdb->fd, (size_t)offset, rw_type, (size_t)len);
"tdb_brunlock failed (fd=%d) at offset %zu"
" rw_type=%d len=%zu",
tdb->fd, (size_t)offset, rw_type, (size_t)len);
int count = 1000;
if (tdb->allrecord_lock.count != 1) {
int count = 1000;
if (tdb->allrecord_lock.count != 1) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_allrecord_upgrade failed: count %u too high",
tdb->allrecord_lock.count);
return -1;
}
if (tdb->allrecord_lock.off != 1) {
"tdb_allrecord_upgrade failed: count %u too high",
tdb->allrecord_lock.count);
return -1;
}
if (tdb->allrecord_lock.off != 1) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_allrecord_upgrade failed: already upgraded?");
return -1;
}
"tdb_allrecord_upgrade failed: already upgraded?");
return -1;
}
tv.tv_usec = 1;
select(0, NULL, NULL, NULL, &tv);
}
tv.tv_usec = 1;
select(0, NULL, NULL, NULL, &tv);
}
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_allrecord_upgrade failed");
return -1;
}
"tdb_allrecord_upgrade failed");
return -1;
}
struct tdb_lock_type *new_lck;
if (offset > TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE + tdb->map_size / 8) {
struct tdb_lock_type *new_lck;
if (offset > TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE + tdb->map_size / 8) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_nest_lock: invalid offset %zu ltype=%d",
(size_t)offset, ltype);
return -1;
"tdb_nest_lock: invalid offset %zu ltype=%d",
(size_t)offset, ltype);
return -1;
new_lck = find_nestlock(tdb, offset);
if (new_lck) {
if (new_lck->ltype == F_RDLCK && ltype == F_WRLCK) {
new_lck = find_nestlock(tdb, offset);
if (new_lck) {
if (new_lck->ltype == F_RDLCK && ltype == F_WRLCK) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_nest_lock: offset %zu has read lock",
(size_t)offset);
return -1;
"tdb_nest_lock: offset %zu has read lock",
(size_t)offset);
return -1;
if (tdb->num_lockrecs
&& offset >= TDB_HASH_LOCK_START
&& offset < TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE) {
if (tdb->num_lockrecs
&& offset >= TDB_HASH_LOCK_START
&& offset < TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_nest_lock: already have a hash lock?");
return -1;
}
"tdb_nest_lock: already have a hash lock?");
return -1;
}
tdb->lockrecs,
sizeof(*tdb->lockrecs) * (tdb->num_lockrecs+1));
if (new_lck == NULL) {
tdb->lockrecs,
sizeof(*tdb->lockrecs) * (tdb->num_lockrecs+1));
if (new_lck == NULL) {
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"tdb_nest_lock: unable to allocate %zu lock struct",
tdb->num_lockrecs + 1);
errno = ENOMEM;
"tdb_nest_lock: unable to allocate %zu lock struct",
tdb->num_lockrecs + 1);
errno = ENOMEM;
lck = find_nestlock(tdb, off);
if ((lck == NULL) || (lck->count == 0)) {
lck = find_nestlock(tdb, off);
if ((lck == NULL) || (lck->count == 0)) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_nest_unlock: no lock for %zu", (size_t)off);
return -1;
}
"tdb_nest_unlock: no lock for %zu", (size_t)off);
return -1;
}
{
/* FIXME: There are no locks on read-only dbs */
if (tdb->read_only) {
{
/* FIXME: There are no locks on read-only dbs */
if (tdb->read_only) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
"tdb_allrecord_lock: read-only");
return -1;
}
"tdb_allrecord_lock: read-only");
return -1;
}
if (tdb->allrecord_lock.count) {
/* a global lock of a different type exists */
if (tdb->allrecord_lock.count) {
/* a global lock of a different type exists */
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
"tdb_allrecord_lock: already have %s lock",
tdb->allrecord_lock.ltype == F_RDLCK
? "read" : "write");
"tdb_allrecord_lock: already have %s lock",
tdb->allrecord_lock.ltype == F_RDLCK
? "read" : "write");
if (tdb_has_hash_locks(tdb)) {
/* can't combine global and chain locks */
if (tdb_has_hash_locks(tdb)) {
/* can't combine global and chain locks */
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
"tdb_allrecord_lock: already have chain lock");
return -1;
}
if (upgradable && ltype != F_RDLCK) {
/* tdb error: you can't upgrade a write lock! */
"tdb_allrecord_lock: already have chain lock");
return -1;
}
if (upgradable && ltype != F_RDLCK) {
/* tdb error: you can't upgrade a write lock! */
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_allrecord_lock: can't upgrade a write lock");
return -1;
}
"tdb_allrecord_lock: can't upgrade a write lock");
return -1;
}
if (tdb_lock_gradual(tdb, ltype, flags, TDB_HASH_LOCK_START,
TDB_HASH_LOCK_RANGE)) {
if (!(flags & TDB_LOCK_PROBE)) {
if (tdb_lock_gradual(tdb, ltype, flags, TDB_HASH_LOCK_START,
TDB_HASH_LOCK_RANGE)) {
if (!(flags & TDB_LOCK_PROBE)) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_allrecord_lock hashes failed");
}
return -1;
"tdb_allrecord_lock hashes failed");
}
return -1;
if (tdb_brlock(tdb, ltype, TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE,
0, flags)) {
if (!(flags & TDB_LOCK_PROBE)) {
if (tdb_brlock(tdb, ltype, TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE,
0, flags)) {
if (!(flags & TDB_LOCK_PROBE)) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_allrecord_lock freetables failed");
}
tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START,
"tdb_allrecord_lock freetables failed");
}
tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START,
int tdb_allrecord_unlock(struct tdb_context *tdb, int ltype)
{
if (tdb->allrecord_lock.count == 0) {
int tdb_allrecord_unlock(struct tdb_context *tdb, int ltype)
{
if (tdb->allrecord_lock.count == 0) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
"tdb_allrecord_unlock: not locked!");
return -1;
}
"tdb_allrecord_unlock: not locked!");
return -1;
}
/* Upgradable locks are marked as write locks. */
if (tdb->allrecord_lock.ltype != ltype
&& (!tdb->allrecord_lock.off || ltype != F_RDLCK)) {
/* Upgradable locks are marked as write locks. */
if (tdb->allrecord_lock.ltype != ltype
&& (!tdb->allrecord_lock.off || ltype != F_RDLCK)) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_allrecord_unlock: have %s lock",
tdb->allrecord_lock.ltype == F_RDLCK
? "read" : "write");
"tdb_allrecord_unlock: have %s lock",
tdb->allrecord_lock.ltype == F_RDLCK
? "read" : "write");
}
if (tdb->allrecord_lock.count) {
}
if (tdb->allrecord_lock.count) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
"tdb_lock_hashes: already have %s allrecordlock",
tdb->allrecord_lock.ltype == F_RDLCK
? "read" : "write");
"tdb_lock_hashes: already have %s allrecordlock",
tdb->allrecord_lock.ltype == F_RDLCK
? "read" : "write");
}
if (tdb_has_free_lock(tdb)) {
}
if (tdb_has_free_lock(tdb)) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_lock_hashes: already have free lock");
return -1;
}
if (tdb_has_expansion_lock(tdb)) {
"tdb_lock_hashes: already have free lock");
return -1;
}
if (tdb_has_expansion_lock(tdb)) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_lock_hashes: already have expansion lock");
return -1;
}
"tdb_lock_hashes: already have expansion lock");
return -1;
}
if (tdb->allrecord_lock.count) {
if (tdb->allrecord_lock.ltype == F_RDLCK
&& ltype == F_WRLCK) {
if (tdb->allrecord_lock.count) {
if (tdb->allrecord_lock.ltype == F_RDLCK
&& ltype == F_WRLCK) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_unlock_hashes RO allrecord!");
return -1;
}
"tdb_unlock_hashes RO allrecord!");
return -1;
}
if (tdb->allrecord_lock.count) {
if (tdb->allrecord_lock.ltype == F_WRLCK)
return 0;
if (tdb->allrecord_lock.count) {
if (tdb->allrecord_lock.ltype == F_WRLCK)
return 0;
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_lock_free_bucket with RO allrecordlock!");
return -1;
}
#if 0 /* FIXME */
if (tdb_has_expansion_lock(tdb)) {
"tdb_lock_free_bucket with RO allrecordlock!");
return -1;
}
#if 0 /* FIXME */
if (tdb_has_expansion_lock(tdb)) {
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_lock_free_bucket: already have expansion lock");
return -1;
}
"tdb_lock_free_bucket: already have expansion lock");
return -1;
}
uint32_t flags;
/* Logging function */
uint32_t flags;
/* Logging function */
+ void (*logfn)(struct tdb_context *tdb,
+ enum tdb_log_level level,
+ void *log_private,
+ const char *message);
void *log_private;
/* Hash function. */
void *log_private;
/* Hash function. */
/* tdb.c: */
void COLD tdb_logerr(struct tdb_context *tdb,
enum TDB_ERROR ecode,
/* tdb.c: */
void COLD tdb_logerr(struct tdb_context *tdb,
enum TDB_ERROR ecode,
- enum tdb_debug_level level,
+ enum tdb_log_level level,
const char *fmt, ...);
#ifdef TDB_TRACE
const char *fmt, ...);
#ifdef TDB_TRACE
chains = tally_new(HISTO_HEIGHT);
if (!ftables || !hashes || !freet || !keys || !data || !extra
|| !uncoal || !buckets || !chains) {
chains = tally_new(HISTO_HEIGHT);
if (!ftables || !hashes || !freet || !keys || !data || !extra
|| !uncoal || !buckets || !chains) {
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"tdb_summary: failed to allocate tally structures");
goto unlock;
}
"tdb_summary: failed to allocate tally structures");
goto unlock;
}
fd = open("/dev/urandom", O_RDONLY);
if (fd >= 0) {
if (read_all(fd, &ret, sizeof(ret))) {
fd = open("/dev/urandom", O_RDONLY);
if (fd >= 0) {
if (read_all(fd, &ret, sizeof(ret))) {
- tdb_logerr(tdb, TDB_SUCCESS, TDB_DEBUG_TRACE,
- "tdb_open: random from /dev/urandom");
char reply[1 + sizeof(uint64_t)];
int r = read(fd, reply, sizeof(reply));
if (r > 1) {
char reply[1 + sizeof(uint64_t)];
int r = read(fd, reply, sizeof(reply));
if (r > 1) {
- tdb_logerr(tdb, TDB_SUCCESS, TDB_DEBUG_TRACE,
- "tdb_open: %u random bytes from"
- " /dev/egd-pool", r-1);
/* Copy at least some bytes. */
memcpy(&ret, reply+1, r - 1);
if (reply[0] == sizeof(uint64_t)
/* Copy at least some bytes. */
memcpy(&ret, reply+1, r - 1);
if (reply[0] == sizeof(uint64_t)
/* Fallback: pid and time. */
gettimeofday(&now, NULL);
ret = getpid() * 100132289ULL + now.tv_sec * 1000000ULL + now.tv_usec;
/* Fallback: pid and time. */
gettimeofday(&now, NULL);
ret = getpid() * 100132289ULL + now.tv_sec * 1000000ULL + now.tv_usec;
- tdb_logerr(tdb, TDB_SUCCESS, TDB_DEBUG_TRACE,
+ tdb_logerr(tdb, TDB_SUCCESS, TDB_LOG_WARNING,
"tdb_open: random from getpid and time");
return ret;
}
"tdb_open: random from getpid and time");
return ret;
}
tdb->map_size = sizeof(newdb);
tdb->map_ptr = malloc(tdb->map_size);
if (!tdb->map_ptr) {
tdb->map_size = sizeof(newdb);
tdb->map_ptr = malloc(tdb->map_size);
if (!tdb->map_ptr) {
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"tdb_new_database: failed to allocate");
return -1;
}
"tdb_new_database: failed to allocate");
return -1;
}
if (rlen != sizeof(newdb)) {
if (rlen >= 0)
errno = ENOSPC;
if (rlen != sizeof(newdb)) {
if (rlen >= 0)
errno = ENOSPC;
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_new_database: %zi writing header: %s",
rlen, strerror(errno));
return -1;
"tdb_new_database: %zi writing header: %s",
rlen, strerror(errno));
return -1;
tdb->stats->size = sizeof(attr->stats);
break;
default:
tdb->stats->size = sizeof(attr->stats);
break;
default:
- tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_LOG_USE_ERROR,
"tdb_open: unknown attribute type %u",
attr->base.attr);
goto fail;
"tdb_open: unknown attribute type %u",
attr->base.attr);
goto fail;
}
if ((open_flags & O_ACCMODE) == O_WRONLY) {
}
if ((open_flags & O_ACCMODE) == O_WRONLY) {
- tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_LOG_USE_ERROR,
"tdb_open: can't open tdb %s write-only", name);
goto fail;
}
"tdb_open: can't open tdb %s write-only", name);
goto fail;
}
if ((tdb->fd = open(name, open_flags, mode)) == -1) {
/* errno set by open(2) */
saved_errno = errno;
if ((tdb->fd = open(name, open_flags, mode)) == -1) {
/* errno set by open(2) */
saved_errno = errno;
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_open: could not open file %s: %s",
name, strerror(errno));
goto fail;
"tdb_open: could not open file %s: %s",
name, strerror(errno));
goto fail;
goto fail;
}
} else if (rlen < 0) {
goto fail;
}
} else if (rlen < 0) {
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_open: error %s reading %s",
strerror(errno), name);
goto fail;
} else if (rlen < sizeof(hdr)
|| strcmp(hdr.magic_food, TDB_MAGIC_FOOD) != 0) {
"tdb_open: error %s reading %s",
strerror(errno), name);
goto fail;
} else if (rlen < sizeof(hdr)
|| strcmp(hdr.magic_food, TDB_MAGIC_FOOD) != 0) {
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_open: %s is not a tdb file", name);
goto fail;
}
"tdb_open: %s is not a tdb file", name);
goto fail;
}
tdb->flags |= TDB_CONVERT;
else {
/* wrong version */
tdb->flags |= TDB_CONVERT;
else {
/* wrong version */
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_open: %s is unknown version 0x%llx",
name, (long long)hdr.version);
goto fail;
"tdb_open: %s is unknown version 0x%llx",
name, (long long)hdr.version);
goto fail;
hash_test = tdb_hash(tdb, &hash_test, sizeof(hash_test));
if (hdr.hash_test != hash_test) {
/* wrong hash variant */
hash_test = tdb_hash(tdb, &hash_test, sizeof(hash_test));
if (hdr.hash_test != hash_test) {
/* wrong hash variant */
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_open: %s uses a different hash function",
name);
goto fail;
"tdb_open: %s uses a different hash function",
name);
goto fail;
if (fstat(tdb->fd, &st) == -1) {
saved_errno = errno;
if (fstat(tdb->fd, &st) == -1) {
saved_errno = errno;
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_open: could not stat open %s: %s",
name, strerror(errno));
goto fail;
"tdb_open: could not stat open %s: %s",
name, strerror(errno));
goto fail;
/* Is it already in the open list? If so, fail. */
if (tdb_already_open(st.st_dev, st.st_ino)) {
/* FIXME */
/* Is it already in the open list? If so, fail. */
if (tdb_already_open(st.st_dev, st.st_ino)) {
/* FIXME */
- tdb_logerr(tdb, TDB_ERR_NESTING, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_NESTING, TDB_LOG_USE_ERROR,
"tdb_open: %s (%d,%d) is already open in this"
" process",
name, (int)st.st_dev, (int)st.st_ino);
"tdb_open: %s (%d,%d) is already open in this"
" process",
name, (int)st.st_dev, (int)st.st_ino);
tdb->name = strdup(name);
if (!tdb->name) {
tdb->name = strdup(name);
if (!tdb->name) {
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"tdb_open: failed to allocate name");
goto fail;
}
"tdb_open: failed to allocate name");
goto fail;
}
free((char *)tdb->name);
if (tdb->fd != -1)
if (close(tdb->fd) != 0)
free((char *)tdb->name);
if (tdb->fd != -1)
if (close(tdb->fd) != 0)
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_open: failed to close tdb->fd"
"tdb_open: failed to close tdb->fd"
+ " on error: %s", strerror(errno));
free(tdb);
errno = saved_errno;
return NULL;
free(tdb);
errno = saved_errno;
return NULL;
/* Slow path. */
newdata = malloc(key.dsize + old_dlen + dbuf.dsize);
if (!newdata) {
/* Slow path. */
newdata = malloc(key.dsize + old_dlen + dbuf.dsize);
if (!newdata) {
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"tdb_append: failed to allocate %zu bytes",
(size_t)(key.dsize+old_dlen+dbuf.dsize));
goto fail;
"tdb_append: failed to allocate %zu bytes",
(size_t)(key.dsize+old_dlen+dbuf.dsize));
goto fail;
void COLD tdb_logerr(struct tdb_context *tdb,
enum TDB_ERROR ecode,
void COLD tdb_logerr(struct tdb_context *tdb,
enum TDB_ERROR ecode,
- enum tdb_debug_level level,
+ enum tdb_log_level level,
const char *fmt, ...)
{
char *message;
const char *fmt, ...)
{
char *message;
message = malloc(len + 1);
if (!message) {
message = malloc(len + 1);
if (!message) {
- tdb->logfn(tdb, TDB_DEBUG_ERROR, tdb->log_private,
+ tdb->logfn(tdb, TDB_LOG_ERROR, tdb->log_private,
"out of memory formatting message:");
tdb->logfn(tdb, level, tdb->log_private, fmt);
return;
"out of memory formatting message:");
tdb->logfn(tdb, level, tdb->log_private, fmt);
return;
enum tdb_summary_flags { TDB_SUMMARY_HISTOGRAMS = 1 };
/* logging uses one of the following levels */
enum tdb_summary_flags { TDB_SUMMARY_HISTOGRAMS = 1 };
/* logging uses one of the following levels */
-enum tdb_debug_level {TDB_DEBUG_FATAL = 0, TDB_DEBUG_ERROR,
- TDB_DEBUG_WARNING, TDB_DEBUG_TRACE};
+enum tdb_log_level {TDB_LOG_ERROR = 0, TDB_LOG_USE_ERROR, TDB_LOG_WARNING};
typedef struct tdb_data {
unsigned char *dptr;
typedef struct tdb_data {
unsigned char *dptr;
/* FIXME: Make typesafe */
typedef int (*tdb_traverse_func)(struct tdb_context *, TDB_DATA, TDB_DATA, void *);
/* FIXME: Make typesafe */
typedef int (*tdb_traverse_func)(struct tdb_context *, TDB_DATA, TDB_DATA, void *);
-typedef void (*tdb_logfn_t)(struct tdb_context *, enum tdb_debug_level, void *, const char *);
typedef uint64_t (*tdb_hashfn_t)(const void *key, size_t len, uint64_t seed,
void *priv);
typedef uint64_t (*tdb_hashfn_t)(const void *key, size_t len, uint64_t seed,
void *priv);
struct tdb_attribute_log {
struct tdb_attribute_base base; /* .attr = TDB_ATTRIBUTE_LOG */
struct tdb_attribute_log {
struct tdb_attribute_base base; /* .attr = TDB_ATTRIBUTE_LOG */
+ void (*log_fn)(struct tdb_context *tdb,
+ enum tdb_log_level level,
+ void *log_private,
+ const char *message);
};
void tap_log_fn(struct tdb_context *tdb,
};
void tap_log_fn(struct tdb_context *tdb,
- enum tdb_debug_level level, void *priv,
+ enum tdb_log_level level, void *priv,
const char *message)
{
if (suppress_logging)
return;
diag("tdb log level %u: %s%s", level, log_prefix, message);
const char *message)
{
if (suppress_logging)
return;
diag("tdb log level %u: %s%s", level, log_prefix, message);
- if (level != TDB_DEBUG_TRACE)
- tap_log_messages++;
extern union tdb_attribute tap_log_attr;
void tap_log_fn(struct tdb_context *tdb,
extern union tdb_attribute tap_log_attr;
void tap_log_fn(struct tdb_context *tdb,
- enum tdb_debug_level level, void *priv,
+ enum tdb_log_level level, void *priv,
const char *message);
static inline bool data_equal(struct tdb_data a, struct tdb_data b)
const char *message);
static inline bool data_equal(struct tdb_data a, struct tdb_data b)
#include "logging.h"
/* FIXME: Check these! */
#include "logging.h"
/* FIXME: Check these! */
-#define INITIAL_TDB_MALLOC "tdb.c", 182, FAILTEST_MALLOC
-#define LOGGING_MALLOC "tdb.c", 739, FAILTEST_MALLOC
+#define INITIAL_TDB_MALLOC "tdb.c", 177, FAILTEST_MALLOC
+#define LOGGING_MALLOC "tdb.c", 734, FAILTEST_MALLOC
#define URANDOM_OPEN "tdb.c", 49, FAILTEST_OPEN
#define URANDOM_READ "tdb.c", 29, FAILTEST_READ
#define URANDOM_OPEN "tdb.c", 49, FAILTEST_OPEN
#define URANDOM_READ "tdb.c", 29, FAILTEST_READ
/* Normally we get a log when setting random seed. */
static void my_log_fn(struct tdb_context *tdb,
/* Normally we get a log when setting random seed. */
static void my_log_fn(struct tdb_context *tdb,
- enum tdb_debug_level level, void *priv,
+ enum tdb_log_level level, void *priv,
const char *message)
{
log_count++;
const char *message)
{
log_count++;
- if (level != TDB_DEBUG_TRACE)
- error_count++;
-
va_start(ap, format);
vfprintf(stdout, format, ap);
va_end(ap);
fflush(stdout);
#if 0
va_start(ap, format);
vfprintf(stdout, format, ap);
va_end(ap);
fflush(stdout);
#if 0
- if (level != TDB_DEBUG_TRACE) {
char *ptr;
signal(SIGUSR1, SIG_IGN);
asprintf(&ptr,"xterm -e gdb /proc/%d/exe %d", getpid(), getpid());
char *ptr;
signal(SIGUSR1, SIG_IGN);
asprintf(&ptr,"xterm -e gdb /proc/%d/exe %d", getpid(), getpid());
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"transaction_read: failed at off=%zu len=%zu",
(size_t)off, (size_t)len);
tdb->transaction->transaction_error = 1;
"transaction_read: failed at off=%zu len=%zu",
(size_t)off, (size_t)len);
tdb->transaction->transaction_error = 1;
/* Only a commit is allowed on a prepared transaction */
if (tdb->transaction->prepared) {
/* Only a commit is allowed on a prepared transaction */
if (tdb->transaction->prepared) {
- tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_LOG_ERROR,
"transaction_write: transaction already prepared,"
" write not allowed");
goto fail;
"transaction_write: transaction already prepared,"
" write not allowed");
goto fail;
(blk+1)*sizeof(uint8_t *));
}
if (new_blocks == NULL) {
(blk+1)*sizeof(uint8_t *));
}
if (new_blocks == NULL) {
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"transaction_write: failed to allocate");
goto fail;
}
"transaction_write: failed to allocate");
goto fail;
}
if (tdb->transaction->blocks[blk] == NULL) {
tdb->transaction->blocks[blk] = (uint8_t *)calloc(getpagesize(), 1);
if (tdb->transaction->blocks[blk] == NULL) {
if (tdb->transaction->blocks[blk] == NULL) {
tdb->transaction->blocks[blk] = (uint8_t *)calloc(getpagesize(), 1);
if (tdb->transaction->blocks[blk] == NULL) {
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"transaction_write: failed to allocate");
goto fail;
}
"transaction_write: failed to allocate");
goto fail;
}
if (tdb->transaction->io_methods->tread(tdb, blk * getpagesize(),
tdb->transaction->blocks[blk],
len2) != 0) {
if (tdb->transaction->io_methods->tread(tdb, blk * getpagesize(),
tdb->transaction->blocks[blk],
len2) != 0) {
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"transaction_write: failed to"
" read old block: %s",
strerror(errno));
"transaction_write: failed to"
" read old block: %s",
strerror(errno));
}
tdb->ecode = TDB_ERR_IO;
if (!probe) {
}
tdb->ecode = TDB_ERR_IO;
if (!probe) {
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_oob len %lld beyond transaction size %lld",
(long long)len,
(long long)tdb->map_size);
"tdb_oob len %lld beyond transaction size %lld",
(long long)len,
(long long)tdb->map_size);
}
if (fsync(tdb->fd) != 0) {
}
if (fsync(tdb->fd) != 0) {
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_transaction: fsync failed: %s",
strerror(errno));
return -1;
"tdb_transaction: fsync failed: %s",
strerror(errno));
return -1;
tdb_off_t moffset = offset & ~(getpagesize()-1);
if (msync(moffset + (char *)tdb->map_ptr,
length + (offset - moffset), MS_SYNC) != 0) {
tdb_off_t moffset = offset & ~(getpagesize()-1);
if (msync(moffset + (char *)tdb->map_ptr,
length + (offset - moffset), MS_SYNC) != 0) {
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_transaction: msync failed: %s",
strerror(errno));
return -1;
"tdb_transaction: msync failed: %s",
strerror(errno));
return -1;
int i;
if (tdb->transaction == NULL) {
int i;
if (tdb->transaction == NULL) {
- tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_LOG_USE_ERROR,
"tdb_transaction_cancel: no transaction");
return;
}
"tdb_transaction_cancel: no transaction");
return;
}
&invalid, sizeof(invalid)) == -1 ||
transaction_sync(tdb, tdb->transaction->magic_offset,
sizeof(invalid)) == -1) {
&invalid, sizeof(invalid)) == -1 ||
transaction_sync(tdb, tdb->transaction->magic_offset,
sizeof(invalid)) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_cancel: failed to remove"
" recovery magic");
}
"tdb_transaction_cancel: failed to remove"
" recovery magic");
}
{
/* some sanity checks */
if (tdb->read_only || (tdb->flags & TDB_INTERNAL)) {
{
/* some sanity checks */
if (tdb->read_only || (tdb->flags & TDB_INTERNAL)) {
- tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_LOG_USE_ERROR,
"tdb_transaction_start: cannot start a transaction"
" on a read-only or internal db");
return -1;
"tdb_transaction_start: cannot start a transaction"
" on a read-only or internal db");
return -1;
/* cope with nested tdb_transaction_start() calls */
if (tdb->transaction != NULL) {
/* cope with nested tdb_transaction_start() calls */
if (tdb->transaction != NULL) {
- tdb_logerr(tdb, TDB_ERR_NESTING, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_NESTING, TDB_LOG_USE_ERROR,
"tdb_transaction_start:"
" already inside transaction");
return -1;
"tdb_transaction_start:"
" already inside transaction");
return -1;
/* the caller must not have any locks when starting a
transaction as otherwise we'll be screwed by lack
of nested locks in posix */
/* the caller must not have any locks when starting a
transaction as otherwise we'll be screwed by lack
of nested locks in posix */
- tdb_logerr(tdb, TDB_ERR_LOCK, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
"tdb_transaction_start: cannot start a transaction"
" with locks held");
return -1;
"tdb_transaction_start: cannot start a transaction"
" with locks held");
return -1;
tdb->transaction = (struct tdb_transaction *)
calloc(sizeof(struct tdb_transaction), 1);
if (tdb->transaction == NULL) {
tdb->transaction = (struct tdb_transaction *)
calloc(sizeof(struct tdb_transaction), 1);
if (tdb->transaction == NULL) {
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"tdb_transaction_start: cannot allocate");
return -1;
}
"tdb_transaction_start: cannot allocate");
return -1;
}
recovery_head = tdb_read_off(tdb, offsetof(struct tdb_header,recovery));
if (recovery_head == TDB_OFF_ERR) {
recovery_head = tdb_read_off(tdb, offsetof(struct tdb_header,recovery));
if (recovery_head == TDB_OFF_ERR) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_recovery_allocate:"
" failed to read recovery head");
return -1;
"tdb_recovery_allocate:"
" failed to read recovery head");
return -1;
if (recovery_head != 0) {
if (methods->tread(tdb, recovery_head, &rec, sizeof(rec))) {
if (recovery_head != 0) {
if (methods->tread(tdb, recovery_head, &rec, sizeof(rec))) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_recovery_allocate:"
" failed to read recovery record");
return -1;
"tdb_recovery_allocate:"
" failed to read recovery record");
return -1;
add_stat(tdb, frees, 1);
if (add_free_record(tdb, recovery_head,
sizeof(rec) + rec.max_len) != 0) {
add_stat(tdb, frees, 1);
if (add_free_record(tdb, recovery_head,
sizeof(rec) + rec.max_len) != 0) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_recovery_allocate:"
" failed to free previous recovery area");
return -1;
"tdb_recovery_allocate:"
" failed to free previous recovery area");
return -1;
sizeof(rec) + *recovery_max_size;
tdb->map_size = tdb->transaction->old_map_size;
if (methods->expand_file(tdb, addition) == -1) {
sizeof(rec) + *recovery_max_size;
tdb->map_size = tdb->transaction->old_map_size;
if (methods->expand_file(tdb, addition) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_recovery_allocate:"
" failed to create recovery area");
return -1;
"tdb_recovery_allocate:"
" failed to create recovery area");
return -1;
tdb_convert(tdb, &recovery_head, sizeof(recovery_head));
if (methods->twrite(tdb, offsetof(struct tdb_header, recovery),
&recovery_head, sizeof(tdb_off_t)) == -1) {
tdb_convert(tdb, &recovery_head, sizeof(recovery_head));
if (methods->twrite(tdb, offsetof(struct tdb_header, recovery),
&recovery_head, sizeof(tdb_off_t)) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_recovery_allocate:"
" failed to write recovery head");
return -1;
"tdb_recovery_allocate:"
" failed to write recovery head");
return -1;
data = (unsigned char *)malloc(recovery_size + sizeof(*rec));
if (data == NULL) {
data = (unsigned char *)malloc(recovery_size + sizeof(*rec));
if (data == NULL) {
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"transaction_setup_recovery: cannot allocate");
return -1;
}
"transaction_setup_recovery: cannot allocate");
return -1;
}
continue;
}
if (offset + length > tdb->map_size) {
continue;
}
if (offset + length > tdb->map_size) {
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_transaction_setup_recovery:"
" transaction data over new region boundary");
free(data);
"tdb_transaction_setup_recovery:"
" transaction data over new region boundary");
free(data);
/* write the recovery data to the recovery area */
if (methods->twrite(tdb, recovery_offset, data,
sizeof(*rec) + recovery_size) == -1) {
/* write the recovery data to the recovery area */
if (methods->twrite(tdb, recovery_offset, data,
sizeof(*rec) + recovery_size) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_setup_recovery:"
" failed to write recovery data");
free(data);
"tdb_transaction_setup_recovery:"
" failed to write recovery data");
free(data);
magic);
if (methods->twrite(tdb, *magic_offset, &magic, sizeof(magic)) == -1) {
magic);
if (methods->twrite(tdb, *magic_offset, &magic, sizeof(magic)) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_setup_recovery:"
" failed to write recovery magic");
return -1;
"tdb_transaction_setup_recovery:"
" failed to write recovery magic");
return -1;
const struct tdb_methods *methods;
if (tdb->transaction == NULL) {
const struct tdb_methods *methods;
if (tdb->transaction == NULL) {
- tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_LOG_USE_ERROR,
"tdb_transaction_prepare_commit: no transaction");
return -1;
}
if (tdb->transaction->prepared) {
_tdb_transaction_cancel(tdb);
"tdb_transaction_prepare_commit: no transaction");
return -1;
}
if (tdb->transaction->prepared) {
_tdb_transaction_cancel(tdb);
- tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_LOG_USE_ERROR,
"tdb_transaction_prepare_commit:"
" transaction already prepared");
return -1;
"tdb_transaction_prepare_commit:"
" transaction already prepared");
return -1;
if (tdb->transaction->transaction_error) {
_tdb_transaction_cancel(tdb);
if (tdb->transaction->transaction_error) {
_tdb_transaction_cancel(tdb);
- tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_LOG_ERROR,
"tdb_transaction_prepare_commit:"
" transaction error pending");
return -1;
"tdb_transaction_prepare_commit:"
" transaction error pending");
return -1;
/* upgrade the main transaction lock region to a write lock */
if (tdb_allrecord_upgrade(tdb) == -1) {
/* upgrade the main transaction lock region to a write lock */
if (tdb_allrecord_upgrade(tdb) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_prepare_commit:"
" failed to upgrade hash locks");
_tdb_transaction_cancel(tdb);
"tdb_transaction_prepare_commit:"
" failed to upgrade hash locks");
_tdb_transaction_cancel(tdb);
/* get the open lock - this prevents new users attaching to the database
during the commit */
if (tdb_lock_open(tdb, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK) == -1) {
/* get the open lock - this prevents new users attaching to the database
during the commit */
if (tdb_lock_open(tdb, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_prepare_commit:"
" failed to get open lock");
_tdb_transaction_cancel(tdb);
"tdb_transaction_prepare_commit:"
" failed to get open lock");
_tdb_transaction_cancel(tdb);
if (!(tdb->flags & TDB_NOSYNC)) {
/* write the recovery data to the end of the file */
if (transaction_setup_recovery(tdb, &tdb->transaction->magic_offset) == -1) {
if (!(tdb->flags & TDB_NOSYNC)) {
/* write the recovery data to the end of the file */
if (transaction_setup_recovery(tdb, &tdb->transaction->magic_offset) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_prepare_commit:"
" failed to setup recovery data");
_tdb_transaction_cancel(tdb);
"tdb_transaction_prepare_commit:"
" failed to setup recovery data");
_tdb_transaction_cancel(tdb);
/* Restore original map size for tdb_expand_file */
tdb->map_size = tdb->transaction->old_map_size;
if (methods->expand_file(tdb, add) == -1) {
/* Restore original map size for tdb_expand_file */
tdb->map_size = tdb->transaction->old_map_size;
if (methods->expand_file(tdb, add) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_prepare_commit:"
" expansion failed");
_tdb_transaction_cancel(tdb);
"tdb_transaction_prepare_commit:"
" expansion failed");
_tdb_transaction_cancel(tdb);
int i;
if (tdb->transaction == NULL) {
int i;
if (tdb->transaction == NULL) {
- tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_LOG_USE_ERROR,
"tdb_transaction_commit: no transaction");
return -1;
}
"tdb_transaction_commit: no transaction");
return -1;
}
if (tdb->transaction->transaction_error) {
tdb_transaction_cancel(tdb);
if (tdb->transaction->transaction_error) {
tdb_transaction_cancel(tdb);
- tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR,
+ tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_transaction_commit:"
" transaction error pending");
return -1;
"tdb_transaction_commit:"
" transaction error pending");
return -1;
if (methods->twrite(tdb, offset, tdb->transaction->blocks[i],
length) == -1) {
if (methods->twrite(tdb, offset, tdb->transaction->blocks[i],
length) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_commit:"
" write failed during commit");
"tdb_transaction_commit:"
" write failed during commit");
/* find the recovery area */
recovery_head = tdb_read_off(tdb, offsetof(struct tdb_header,recovery));
if (recovery_head == TDB_OFF_ERR) {
/* find the recovery area */
recovery_head = tdb_read_off(tdb, offsetof(struct tdb_header,recovery));
if (recovery_head == TDB_OFF_ERR) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_recover:"
" failed to read recovery head");
return -1;
"tdb_transaction_recover:"
" failed to read recovery head");
return -1;
/* read the recovery record */
if (tdb_read_convert(tdb, recovery_head, &rec, sizeof(rec)) == -1) {
/* read the recovery record */
if (tdb_read_convert(tdb, recovery_head, &rec, sizeof(rec)) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_recover:"
" failed to read recovery record");
return -1;
"tdb_transaction_recover:"
" failed to read recovery record");
return -1;
- tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_transaction_recover:"
" attempt to recover read only database");
return -1;
"tdb_transaction_recover:"
" attempt to recover read only database");
return -1;
data = (unsigned char *)malloc(rec.len);
if (data == NULL) {
data = (unsigned char *)malloc(rec.len);
if (data == NULL) {
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"tdb_transaction_recover:"
" failed to allocate recovery data");
return -1;
"tdb_transaction_recover:"
" failed to allocate recovery data");
return -1;
/* read the full recovery data */
if (tdb->methods->tread(tdb, recovery_head + sizeof(rec), data,
rec.len) == -1) {
/* read the full recovery data */
if (tdb->methods->tread(tdb, recovery_head + sizeof(rec), data,
rec.len) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_recover:"
" failed to read recovery data");
return -1;
"tdb_transaction_recover:"
" failed to read recovery data");
return -1;
if (tdb->methods->twrite(tdb, ofs, p, len) == -1) {
free(data);
if (tdb->methods->twrite(tdb, ofs, p, len) == -1) {
free(data);
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_recover:"
" failed to recover %zu bytes at offset %zu",
(size_t)len, (size_t)ofs);
"tdb_transaction_recover:"
" failed to recover %zu bytes at offset %zu",
(size_t)len, (size_t)ofs);
free(data);
if (transaction_sync(tdb, 0, tdb->map_size) == -1) {
free(data);
if (transaction_sync(tdb, 0, tdb->map_size) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_recover: failed to sync recovery");
return -1;
}
"tdb_transaction_recover: failed to sync recovery");
return -1;
}
if (recovery_eof <= recovery_head) {
if (tdb_write_off(tdb, offsetof(struct tdb_header,recovery), 0)
== -1) {
if (recovery_eof <= recovery_head) {
if (tdb_write_off(tdb, offsetof(struct tdb_header,recovery), 0)
== -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_recover:"
" failed to remove recovery head");
return -1;
"tdb_transaction_recover:"
" failed to remove recovery head");
return -1;
recovery_head
+ offsetof(struct tdb_recovery_record, magic),
TDB_RECOVERY_INVALID_MAGIC) == -1) {
recovery_head
+ offsetof(struct tdb_recovery_record, magic),
TDB_RECOVERY_INVALID_MAGIC) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_recover:"
" failed to remove recovery magic");
return -1;
}
if (transaction_sync(tdb, 0, recovery_eof) == -1) {
"tdb_transaction_recover:"
" failed to remove recovery magic");
return -1;
}
if (transaction_sync(tdb, 0, recovery_eof) == -1) {
- tdb_logerr(tdb, tdb->ecode, TDB_DEBUG_FATAL,
+ tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
"tdb_transaction_recover: failed to sync2 recovery");
return -1;
}
"tdb_transaction_recover: failed to sync2 recovery");
return -1;
}
- tdb_logerr(tdb, TDB_SUCCESS, TDB_DEBUG_TRACE,
+ tdb_logerr(tdb, TDB_SUCCESS, TDB_LOG_WARNING,
"tdb_transaction_recover: recovered %zu byte database",
(size_t)recovery_eof);
"tdb_transaction_recover: recovered %zu byte database",
(size_t)recovery_eof);