X-Git-Url: http://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Ftdb2%2Ftdb1_tdb.c;h=a50303c33ce4e6333e823ceea2cceff6788e60f7;hp=93d2132f3847f453319054becf32fb4770672619;hb=3352e4e947777d4a90a2dd4f3037e1e494231b25;hpb=b929638e3cfe629285af3ecd0813e03eaeaa1133 diff --git a/ccan/tdb2/tdb1_tdb.c b/ccan/tdb2/tdb1_tdb.c index 93d2132f..a50303c3 100644 --- a/ccan/tdb2/tdb1_tdb.c +++ b/ccan/tdb2/tdb1_tdb.c @@ -26,18 +26,19 @@ */ #include "tdb1_private.h" +#include -_PUBLIC_ TDB1_DATA tdb1_null; +TDB_DATA tdb1_null; /* non-blocking increment of the tdb sequence number if the tdb has been opened using - the TDB1_SEQNUM flag + the TDB_SEQNUM flag */ -_PUBLIC_ void tdb1_increment_seqnum_nonblock(struct tdb1_context *tdb) +void tdb1_increment_seqnum_nonblock(struct tdb_context *tdb) { tdb1_off_t seqnum=0; - if (!(tdb->flags & TDB1_SEQNUM)) { + if (!(tdb->flags & TDB_SEQNUM)) { return; } @@ -51,32 +52,32 @@ _PUBLIC_ void tdb1_increment_seqnum_nonblock(struct tdb1_context *tdb) /* increment the tdb sequence number if the tdb has been opened using - the TDB1_SEQNUM flag + the TDB_SEQNUM flag */ -static void tdb1_increment_seqnum(struct tdb1_context *tdb) +static void tdb1_increment_seqnum(struct tdb_context *tdb) { - if (!(tdb->flags & TDB1_SEQNUM)) { + if (!(tdb->flags & TDB_SEQNUM)) { return; } if (tdb1_nest_lock(tdb, TDB1_SEQNUM_OFS, F_WRLCK, - TDB1_LOCK_WAIT|TDB1_LOCK_PROBE) != 0) { + TDB_LOCK_WAIT|TDB_LOCK_PROBE) != 0) { return; } tdb1_increment_seqnum_nonblock(tdb); - tdb1_nest_unlock(tdb, TDB1_SEQNUM_OFS, F_WRLCK, false); + tdb1_nest_unlock(tdb, TDB1_SEQNUM_OFS, F_WRLCK); } -static int tdb1_key_compare(TDB1_DATA key, TDB1_DATA data, void *private_data) +static int tdb1_key_compare(TDB_DATA key, TDB_DATA data, void *private_data) { return memcmp(data.dptr, key.dptr, data.dsize); } /* Returns 0 on fail. On success, return offset of record, and fills in rec */ -static tdb1_off_t tdb1_find(struct tdb1_context *tdb, TDB1_DATA key, uint32_t hash, +static tdb1_off_t tdb1_find(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, struct tdb1_record *r) { tdb1_off_t rec_ptr; @@ -99,18 +100,19 @@ static tdb1_off_t tdb1_find(struct tdb1_context *tdb, TDB1_DATA key, uint32_t ha } /* detect tight infinite loop */ if (rec_ptr == r->next) { - tdb->ecode = TDB1_ERR_CORRUPT; - TDB1_LOG((tdb, TDB1_DEBUG_FATAL, "tdb1_find: loop detected.\n")); + tdb->last_error = tdb_logerr(tdb, TDB_ERR_CORRUPT, + TDB_LOG_ERROR, + "tdb1_find: loop detected."); return 0; } rec_ptr = r->next; } - tdb->ecode = TDB1_ERR_NOEXIST; + tdb->last_error = TDB_ERR_NOEXIST; return 0; } /* As tdb1_find, but if you succeed, keep the lock */ -tdb1_off_t tdb1_find_lock_hash(struct tdb1_context *tdb, TDB1_DATA key, uint32_t hash, int locktype, +tdb1_off_t tdb1_find_lock_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, int locktype, struct tdb1_record *rec) { uint32_t rec_ptr; @@ -122,13 +124,13 @@ tdb1_off_t tdb1_find_lock_hash(struct tdb1_context *tdb, TDB1_DATA key, uint32_t return rec_ptr; } -static TDB1_DATA _tdb1_fetch(struct tdb1_context *tdb, TDB1_DATA key); +static TDB_DATA _tdb1_fetch(struct tdb_context *tdb, TDB_DATA key); /* update an entry in place - this only works if the new data size is <= the old data size and the key exists. on failure return -1. */ -static int tdb1_update_hash(struct tdb1_context *tdb, TDB1_DATA key, uint32_t hash, TDB1_DATA dbuf) +static int tdb1_update_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, TDB_DATA dbuf) { struct tdb1_record rec; tdb1_off_t rec_ptr; @@ -142,7 +144,7 @@ static int tdb1_update_hash(struct tdb1_context *tdb, TDB1_DATA key, uint32_t ha if (rec.key_len == key.dsize && rec.data_len == dbuf.dsize && rec.full_hash == hash) { - TDB1_DATA data = _tdb1_fetch(tdb, key); + TDB_DATA data = _tdb1_fetch(tdb, key); if (data.dsize == dbuf.dsize && memcmp(data.dptr, dbuf.dptr, data.dsize) == 0) { if (data.dptr) { @@ -157,11 +159,11 @@ static int tdb1_update_hash(struct tdb1_context *tdb, TDB1_DATA key, uint32_t ha /* must be long enough key, data and tailer */ if (rec.rec_len < key.dsize + dbuf.dsize + sizeof(tdb1_off_t)) { - tdb->ecode = TDB1_SUCCESS; /* Not really an error */ + tdb->last_error = TDB_SUCCESS; /* Not really an error */ return -1; } - if (tdb->methods->tdb1_write(tdb, rec_ptr + sizeof(rec) + rec.key_len, + if (tdb->tdb1.io->tdb1_write(tdb, rec_ptr + sizeof(rec) + rec.key_len, dbuf.dptr, dbuf.dsize) == -1) return -1; @@ -176,19 +178,19 @@ static int tdb1_update_hash(struct tdb1_context *tdb, TDB1_DATA key, uint32_t ha /* find an entry in the database given a key */ /* If an entry doesn't exist tdb1_err will be set to - * TDB1_ERR_NOEXIST. If a key has no data attached - * then the TDB1_DATA will have zero length but + * TDB_ERR_NOEXIST. If a key has no data attached + * then the TDB_DATA will have zero length but * a non-zero pointer */ -static TDB1_DATA _tdb1_fetch(struct tdb1_context *tdb, TDB1_DATA key) +static TDB_DATA _tdb1_fetch(struct tdb_context *tdb, TDB_DATA key) { tdb1_off_t rec_ptr; struct tdb1_record rec; - TDB1_DATA ret; + TDB_DATA ret; uint32_t hash; /* find which hash bucket it is in */ - hash = tdb->hash_fn(&key); + hash = tdb_hash(tdb, key.dptr, key.dsize); if (!(rec_ptr = tdb1_find_lock_hash(tdb,key,hash,F_RDLCK,&rec))) return tdb1_null; @@ -199,48 +201,31 @@ static TDB1_DATA _tdb1_fetch(struct tdb1_context *tdb, TDB1_DATA key) return ret; } -_PUBLIC_ TDB1_DATA tdb1_fetch(struct tdb1_context *tdb, TDB1_DATA key) +enum TDB_ERROR tdb1_fetch(struct tdb_context *tdb, TDB_DATA key, TDB_DATA *data) { - TDB1_DATA ret = _tdb1_fetch(tdb, key); - - return ret; + *data = _tdb1_fetch(tdb, key); + if (data->dptr == NULL) + return tdb->last_error; + return TDB_SUCCESS; } -/* - * Find an entry in the database and hand the record's data to a parsing - * function. The parsing function is executed under the chain read lock, so it - * should be fast and should not block on other syscalls. - * - * DON'T CALL OTHER TDB CALLS FROM THE PARSER, THIS MIGHT LEAD TO SEGFAULTS. - * - * For mmapped tdb's that do not have a transaction open it points the parsing - * function directly at the mmap area, it avoids the malloc/memcpy in this - * case. If a transaction is open or no mmap is available, it has to do - * malloc/read/parse/free. - * - * This is interesting for all readers of potentially large data structures in - * the tdb records, ldb indexes being one example. - * - * Return -1 if the record was not found. - */ - -_PUBLIC_ int tdb1_parse_record(struct tdb1_context *tdb, TDB1_DATA key, - int (*parser)(TDB1_DATA key, TDB1_DATA data, - void *private_data), - void *private_data) +enum TDB_ERROR tdb1_parse_record(struct tdb_context *tdb, TDB_DATA key, + enum TDB_ERROR (*parser)(TDB_DATA key, + TDB_DATA data, + void *private_data), + void *private_data) { tdb1_off_t rec_ptr; struct tdb1_record rec; - int ret; + enum TDB_ERROR ret; uint32_t hash; /* find which hash bucket it is in */ - hash = tdb->hash_fn(&key); + hash = tdb_hash(tdb, key.dptr, key.dsize); if (!(rec_ptr = tdb1_find_lock_hash(tdb,key,hash,F_RDLCK,&rec))) { /* record not found */ - tdb->ecode = TDB1_ERR_NOEXIST; - return -1; + return TDB_ERR_NOEXIST; } ret = tdb1_parse_data(tdb, key, rec_ptr + sizeof(rec) + rec.key_len, @@ -257,7 +242,7 @@ _PUBLIC_ int tdb1_parse_record(struct tdb1_context *tdb, TDB1_DATA key, this doesn't match the conventions in the rest of this module, but is compatible with gdbm */ -static int tdb1_exists_hash(struct tdb1_context *tdb, TDB1_DATA key, uint32_t hash) +static int tdb1_exists_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash) { struct tdb1_record rec; @@ -267,24 +252,25 @@ static int tdb1_exists_hash(struct tdb1_context *tdb, TDB1_DATA key, uint32_t ha return 1; } -_PUBLIC_ int tdb1_exists(struct tdb1_context *tdb, TDB1_DATA key) +int tdb1_exists(struct tdb_context *tdb, TDB_DATA key) { - uint32_t hash = tdb->hash_fn(&key); + uint32_t hash = tdb_hash(tdb, key.dptr, key.dsize); int ret; + assert(tdb->flags & TDB_VERSION1); ret = tdb1_exists_hash(tdb, key, hash); return ret; } /* actually delete an entry in the database given the offset */ -int tdb1_do_delete(struct tdb1_context *tdb, tdb1_off_t rec_ptr, struct tdb1_record *rec) +int tdb1_do_delete(struct tdb_context *tdb, tdb1_off_t rec_ptr, struct tdb1_record *rec) { tdb1_off_t last_ptr, i; struct tdb1_record lastrec; - if (tdb->read_only || tdb->traverse_read) return -1; + if ((tdb->flags & TDB_RDONLY) || tdb->tdb1.traverse_read) return -1; - if (((tdb->traverse_write != 0) && (!TDB1_DEAD(rec))) || + if (((tdb->tdb1.traverse_write != 0) && (!TDB1_DEAD(rec))) || tdb1_write_lock_record(tdb, rec_ptr) == -1) { /* Someone traversing here: mark it as dead */ rec->magic = TDB1_DEAD_MAGIC; @@ -312,7 +298,7 @@ int tdb1_do_delete(struct tdb1_context *tdb, tdb1_off_t rec_ptr, struct tdb1_rec return 0; } -static int tdb1_count_dead(struct tdb1_context *tdb, uint32_t hash) +static int tdb1_count_dead(struct tdb_context *tdb, uint32_t hash) { int res = 0; tdb1_off_t rec_ptr; @@ -337,7 +323,7 @@ static int tdb1_count_dead(struct tdb1_context *tdb, uint32_t hash) /* * Purge all DEAD records from a hash chain */ -static int tdb1_purge_dead(struct tdb1_context *tdb, uint32_t hash) +static int tdb1_purge_dead(struct tdb_context *tdb, uint32_t hash) { int res = -1; struct tdb1_record rec; @@ -373,13 +359,13 @@ static int tdb1_purge_dead(struct tdb1_context *tdb, uint32_t hash) } /* delete an entry in the database given a key */ -static int tdb1_delete_hash(struct tdb1_context *tdb, TDB1_DATA key, uint32_t hash) +static int tdb1_delete_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash) { tdb1_off_t rec_ptr; struct tdb1_record rec; int ret; - if (tdb->max_dead_records != 0) { + if (tdb->tdb1.max_dead_records != 0) { /* * Allow for some dead records per hash chain, mainly for @@ -389,7 +375,7 @@ static int tdb1_delete_hash(struct tdb1_context *tdb, TDB1_DATA key, uint32_t ha if (tdb1_lock(tdb, TDB1_BUCKET(hash), F_WRLCK) == -1) return -1; - if (tdb1_count_dead(tdb, hash) >= tdb->max_dead_records) { + if (tdb1_count_dead(tdb, hash) >= tdb->tdb1.max_dead_records) { /* * Don't let the per-chain freelist grow too large, * delete all existing dead records @@ -421,15 +407,17 @@ static int tdb1_delete_hash(struct tdb1_context *tdb, TDB1_DATA key, uint32_t ha } if (tdb1_unlock(tdb, TDB1_BUCKET(rec.full_hash), F_WRLCK) != 0) - TDB1_LOG((tdb, TDB1_DEBUG_WARNING, "tdb1_delete: WARNING tdb1_unlock failed!\n")); + tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, + "tdb1_delete: WARNING tdb1_unlock failed!"); return ret; } -_PUBLIC_ int tdb1_delete(struct tdb1_context *tdb, TDB1_DATA key) +int tdb1_delete(struct tdb_context *tdb, TDB_DATA key) { - uint32_t hash = tdb->hash_fn(&key); + uint32_t hash = tdb_hash(tdb, key.dptr, key.dsize); int ret; + assert(tdb->flags & TDB_VERSION1); ret = tdb1_delete_hash(tdb, key, hash); return ret; } @@ -437,7 +425,7 @@ _PUBLIC_ int tdb1_delete(struct tdb1_context *tdb, TDB1_DATA key) /* * See if we have a dead record around with enough space */ -static tdb1_off_t tdb1_find_dead(struct tdb1_context *tdb, uint32_t hash, +static tdb1_off_t tdb1_find_dead(struct tdb_context *tdb, uint32_t hash, struct tdb1_record *r, tdb1_len_t length) { tdb1_off_t rec_ptr; @@ -463,8 +451,8 @@ static tdb1_off_t tdb1_find_dead(struct tdb1_context *tdb, uint32_t hash, return 0; } -static int _tdb1_store(struct tdb1_context *tdb, TDB1_DATA key, - TDB1_DATA dbuf, int flag, uint32_t hash) +static int _tdb1_store(struct tdb_context *tdb, TDB_DATA key, + TDB_DATA dbuf, int flag, uint32_t hash) { struct tdb1_record rec; tdb1_off_t rec_ptr; @@ -472,9 +460,9 @@ static int _tdb1_store(struct tdb1_context *tdb, TDB1_DATA key, int ret = -1; /* check for it existing, on insert. */ - if (flag == TDB1_INSERT) { + if (flag == TDB_INSERT) { if (tdb1_exists_hash(tdb, key, hash)) { - tdb->ecode = TDB1_ERR_EXISTS; + tdb->last_error = TDB_ERR_EXISTS; goto fail; } } else { @@ -482,27 +470,27 @@ static int _tdb1_store(struct tdb1_context *tdb, TDB1_DATA key, if (tdb1_update_hash(tdb, key, hash, dbuf) == 0) { goto done; } - if (tdb->ecode == TDB1_ERR_NOEXIST && - flag == TDB1_MODIFY) { + if (tdb->last_error == TDB_ERR_NOEXIST && + flag == TDB_MODIFY) { /* if the record doesn't exist and we are in TDB1_MODIFY mode then we should fail the store */ goto fail; } } /* reset the error code potentially set by the tdb1_update() */ - tdb->ecode = TDB1_SUCCESS; + tdb->last_error = TDB_SUCCESS; /* delete any existing record - if it doesn't exist we don't care. Doing this first reduces fragmentation, and avoids coalescing with `allocated' block before it's updated. */ - if (flag != TDB1_INSERT) + if (flag != TDB_INSERT) tdb1_delete_hash(tdb, key, hash); /* Copy key+value *before* allocating free space in case malloc fails and we are left with a dead spot in the tdb. */ if (!(p = (char *)malloc(key.dsize + dbuf.dsize))) { - tdb->ecode = TDB1_ERR_OOM; + tdb->last_error = TDB_ERR_OOM; goto fail; } @@ -510,7 +498,7 @@ static int _tdb1_store(struct tdb1_context *tdb, TDB1_DATA key, if (dbuf.dsize) memcpy(p+key.dsize, dbuf.dptr, dbuf.dsize); - if (tdb->max_dead_records != 0) { + if (tdb->tdb1.max_dead_records != 0) { /* * Allow for some dead records per hash chain, look if we can * find one that can hold the new record. We need enough space @@ -527,7 +515,7 @@ static int _tdb1_store(struct tdb1_context *tdb, TDB1_DATA key, rec.full_hash = hash; rec.magic = TDB1_MAGIC; if (tdb1_rec_write(tdb, rec_ptr, &rec) == -1 - || tdb->methods->tdb1_write( + || tdb->tdb1.io->tdb1_write( tdb, rec_ptr + sizeof(rec), p, key.dsize + dbuf.dsize) == -1) { goto fail; @@ -546,7 +534,7 @@ static int _tdb1_store(struct tdb1_context *tdb, TDB1_DATA key, goto fail; } - if ((tdb->max_dead_records != 0) + if ((tdb->tdb1.max_dead_records != 0) && (tdb1_purge_dead(tdb, hash) == -1)) { tdb1_unlock(tdb, -1, F_WRLCK); goto fail; @@ -572,7 +560,7 @@ static int _tdb1_store(struct tdb1_context *tdb, TDB1_DATA key, /* write out and point the top of the hash chain at it */ if (tdb1_rec_write(tdb, rec_ptr, &rec) == -1 - || tdb->methods->tdb1_write(tdb, rec_ptr+sizeof(rec), p, key.dsize+dbuf.dsize)==-1 + || tdb->tdb1.io->tdb1_write(tdb, rec_ptr+sizeof(rec), p, key.dsize+dbuf.dsize)==-1 || tdb1_ofs_write(tdb, TDB1_HASH_TOP(hash), &rec_ptr) == -1) { /* Need to tdb1_unallocate() here */ goto fail; @@ -594,18 +582,20 @@ static int _tdb1_store(struct tdb1_context *tdb, TDB1_DATA key, return 0 on success, -1 on failure */ -_PUBLIC_ int tdb1_store(struct tdb1_context *tdb, TDB1_DATA key, TDB1_DATA dbuf, int flag) +int tdb1_store(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag) { uint32_t hash; int ret; - if (tdb->read_only || tdb->traverse_read) { - tdb->ecode = TDB1_ERR_RDONLY; + assert(tdb->flags & TDB_VERSION1); + + if ((tdb->flags & TDB_RDONLY) || tdb->tdb1.traverse_read) { + tdb->last_error = TDB_ERR_RDONLY; return -1; } /* find which hash bucket it is in */ - hash = tdb->hash_fn(&key); + hash = tdb_hash(tdb, key.dptr, key.dsize); if (tdb1_lock(tdb, TDB1_BUCKET(hash), F_WRLCK) == -1) return -1; @@ -615,14 +605,16 @@ _PUBLIC_ int tdb1_store(struct tdb1_context *tdb, TDB1_DATA key, TDB1_DATA dbuf, } /* Append to an entry. Create if not exist. */ -_PUBLIC_ int tdb1_append(struct tdb1_context *tdb, TDB1_DATA key, TDB1_DATA new_dbuf) +int tdb1_append(struct tdb_context *tdb, TDB_DATA key, TDB_DATA new_dbuf) { uint32_t hash; - TDB1_DATA dbuf; + TDB_DATA dbuf; int ret = -1; + assert(tdb->flags & TDB_VERSION1); + /* find which hash bucket it is in */ - hash = tdb->hash_fn(&key); + hash = tdb_hash(tdb, key.dptr, key.dsize); if (tdb1_lock(tdb, TDB1_BUCKET(hash), F_WRLCK) == -1) return -1; @@ -645,7 +637,7 @@ _PUBLIC_ int tdb1_append(struct tdb1_context *tdb, TDB1_DATA key, TDB1_DATA new_ } if (dbuf.dptr == NULL) { - tdb->ecode = TDB1_ERR_OOM; + tdb->last_error = TDB_ERR_OOM; goto failed; } @@ -661,16 +653,6 @@ failed: } -/* - return the current logging function - useful for external tdb routines that wish to log tdb errors -*/ -_PUBLIC_ tdb1_log_func tdb1_log_fn(struct tdb1_context *tdb) -{ - return tdb->log.log_fn; -} - - /* get the tdb sequence number. Only makes sense if the writers opened with TDB1_SEQNUM set. Note that this sequence number will wrap quite @@ -681,7 +663,7 @@ _PUBLIC_ tdb1_log_func tdb1_log_fn(struct tdb1_context *tdb) The aim of this sequence number is to allow for a very lightweight test of a possible tdb change. */ -_PUBLIC_ int tdb1_get_seqnum(struct tdb1_context *tdb) +int tdb1_get_seqnum(struct tdb_context *tdb) { tdb1_off_t seqnum=0; @@ -689,31 +671,29 @@ _PUBLIC_ int tdb1_get_seqnum(struct tdb1_context *tdb) return seqnum; } -_PUBLIC_ int tdb1_hash_size(struct tdb1_context *tdb) -{ - return tdb->header.hash_size; -} - /* add a region of the file to the freelist. Length is the size of the region in bytes, which includes the free list header that needs to be added */ -static int tdb1_free_region(struct tdb1_context *tdb, tdb1_off_t offset, ssize_t length) +static int tdb1_free_region(struct tdb_context *tdb, tdb1_off_t offset, ssize_t length) { struct tdb1_record rec; if (length <= sizeof(rec)) { /* the region is not worth adding */ return 0; } - if (length + offset > tdb->map_size) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL,"tdb1_free_region: adding region beyond end of file\n")); + if (length + offset > tdb->file->map_size) { + tdb->last_error = tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR, + "tdb1_free_region: adding region beyond" + " end of file"); return -1; } memset(&rec,'\0',sizeof(rec)); rec.rec_len = length - sizeof(rec); if (tdb1_free(tdb, offset, &rec) == -1) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL,"tdb1_free_region: failed to add free record\n")); + tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, + "tdb1_free_region: failed to add free record"); return -1; } return 0; @@ -726,7 +706,7 @@ static int tdb1_free_region(struct tdb1_context *tdb, tdb1_off_t offset, ssize_t This code carefully steps around the recovery area, leaving it alone */ -_PUBLIC_ int tdb1_wipe_all(struct tdb1_context *tdb) +int tdb1_wipe_all(struct tdb_context *tdb) { int i; tdb1_off_t offset = 0; @@ -734,7 +714,7 @@ _PUBLIC_ int tdb1_wipe_all(struct tdb1_context *tdb) tdb1_off_t recovery_head; tdb1_len_t recovery_size = 0; - if (tdb1_lockall(tdb) != 0) { + if (tdb_lockall(tdb) != TDB_SUCCESS) { return -1; } @@ -744,30 +724,34 @@ _PUBLIC_ int tdb1_wipe_all(struct tdb1_context *tdb) tdb1_wipe_all() in a transaction will increase the size of the tdb by the size of the recovery area */ if (tdb1_ofs_read(tdb, TDB1_RECOVERY_HEAD, &recovery_head) == -1) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL, "tdb1_wipe_all: failed to read recovery head\n")); + tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, + "tdb1_wipe_all: failed to read recovery head"); goto failed; } if (recovery_head != 0) { struct tdb1_record rec; - if (tdb->methods->tdb1_read(tdb, recovery_head, &rec, sizeof(rec), TDB1_DOCONV()) == -1) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL, "tdb1_wipe_all: failed to read recovery record\n")); + if (tdb->tdb1.io->tdb1_read(tdb, recovery_head, &rec, sizeof(rec), TDB1_DOCONV()) == -1) { + tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, + "tdb1_wipe_all: failed to read recovery record"); return -1; } recovery_size = rec.rec_len + sizeof(rec); } /* wipe the hashes */ - for (i=0;iheader.hash_size;i++) { + for (i=0;itdb1.header.hash_size;i++) { if (tdb1_ofs_write(tdb, TDB1_HASH_TOP(i), &offset) == -1) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL,"tdb1_wipe_all: failed to write hash %d\n", i)); + tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, + "tdb1_wipe_all: failed to write hash %d", i); goto failed; } } /* wipe the freelist */ if (tdb1_ofs_write(tdb, TDB1_FREELIST_TOP, &offset) == -1) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL,"tdb1_wipe_all: failed to write freelist\n")); + tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, + "tdb1_wipe_all: failed to write freelist"); goto failed; } @@ -775,8 +759,8 @@ _PUBLIC_ int tdb1_wipe_all(struct tdb1_context *tdb) for the recovery area */ if (recovery_size == 0) { /* the simple case - the whole file can be used as a freelist */ - data_len = (tdb->map_size - TDB1_DATA_START(tdb->header.hash_size)); - if (tdb1_free_region(tdb, TDB1_DATA_START(tdb->header.hash_size), data_len) != 0) { + data_len = (tdb->file->map_size - TDB1_DATA_START(tdb->tdb1.header.hash_size)); + if (tdb1_free_region(tdb, TDB1_DATA_START(tdb->tdb1.header.hash_size), data_len) != 0) { goto failed; } } else { @@ -788,42 +772,38 @@ _PUBLIC_ int tdb1_wipe_all(struct tdb1_context *tdb) move the recovery area or we risk subtle data corruption */ - data_len = (recovery_head - TDB1_DATA_START(tdb->header.hash_size)); - if (tdb1_free_region(tdb, TDB1_DATA_START(tdb->header.hash_size), data_len) != 0) { + data_len = (recovery_head - TDB1_DATA_START(tdb->tdb1.header.hash_size)); + if (tdb1_free_region(tdb, TDB1_DATA_START(tdb->tdb1.header.hash_size), data_len) != 0) { goto failed; } /* and the 2nd free list entry after the recovery area - if any */ - data_len = tdb->map_size - (recovery_head+recovery_size); + data_len = tdb->file->map_size - (recovery_head+recovery_size); if (tdb1_free_region(tdb, recovery_head+recovery_size, data_len) != 0) { goto failed; } } - if (tdb1_unlockall(tdb) != 0) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL,"tdb1_wipe_all: failed to unlock\n")); - goto failed; - } - + tdb_unlockall(tdb); return 0; failed: - tdb1_unlockall(tdb); + tdb_unlockall(tdb); return -1; } struct traverse_state { - bool error; - struct tdb1_context *dest_db; + enum TDB_ERROR error; + struct tdb_context *dest_db; }; /* traverse function for repacking */ -static int repack_traverse(struct tdb1_context *tdb, TDB1_DATA key, TDB1_DATA data, void *private_data) +static int repack_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data, void *private_data) { struct traverse_state *state = (struct traverse_state *)private_data; - if (tdb1_store(state->dest_db, key, data, TDB1_INSERT) != 0) { - state->error = true; + if (tdb1_store(state->dest_db, key, data, TDB_INSERT) != 0) { + state->error = state->dest_db->last_error; return -1; } return 0; @@ -832,68 +812,81 @@ static int repack_traverse(struct tdb1_context *tdb, TDB1_DATA key, TDB1_DATA da /* repack a tdb */ -_PUBLIC_ int tdb1_repack(struct tdb1_context *tdb) +int tdb1_repack(struct tdb_context *tdb) { - struct tdb1_context *tmp_db; + struct tdb_context *tmp_db; struct traverse_state state; + union tdb_attribute hsize; + + hsize.base.attr = TDB_ATTRIBUTE_TDB1_HASHSIZE; + hsize.base.next = NULL; + hsize.tdb1_hashsize.hsize = tdb->tdb1.header.hash_size; if (tdb1_transaction_start(tdb) != 0) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL, __location__ " Failed to start transaction\n")); + tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, + __location__ " Failed to start transaction"); return -1; } - tmp_db = tdb1_open("tmpdb", tdb1_hash_size(tdb), TDB1_INTERNAL, O_RDWR|O_CREAT, 0); + tmp_db = tdb_open("tmpdb", TDB_INTERNAL, O_RDWR|O_CREAT, 0, &hsize); if (tmp_db == NULL) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL, __location__ " Failed to create tmp_db\n")); + tdb->last_error = tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR, + __location__ " Failed to create tmp_db"); tdb1_transaction_cancel(tdb); return -1; } - state.error = false; + state.error = TDB_SUCCESS; state.dest_db = tmp_db; - if (tdb1_traverse_read(tdb, repack_traverse, &state) == -1) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL, __location__ " Failed to traverse copying out\n")); + if (tdb1_traverse(tdb, repack_traverse, &state) == -1) { + tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, + __location__ " Failed to traverse copying out"); tdb1_transaction_cancel(tdb); - tdb1_close(tmp_db); + tdb_close(tmp_db); return -1; } - if (state.error) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL, __location__ " Error during traversal\n")); + if (state.error != TDB_SUCCESS) { + tdb->last_error = tdb_logerr(tdb, state.error, TDB_LOG_ERROR, + __location__ " Error during traversal"); tdb1_transaction_cancel(tdb); - tdb1_close(tmp_db); + tdb_close(tmp_db); return -1; } if (tdb1_wipe_all(tdb) != 0) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL, __location__ " Failed to wipe database\n")); + tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, + __location__ " Failed to wipe database\n"); tdb1_transaction_cancel(tdb); - tdb1_close(tmp_db); + tdb_close(tmp_db); return -1; } - state.error = false; + state.error = TDB_SUCCESS; state.dest_db = tdb; - if (tdb1_traverse_read(tmp_db, repack_traverse, &state) == -1) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL, __location__ " Failed to traverse copying back\n")); + if (tdb1_traverse(tmp_db, repack_traverse, &state) == -1) { + tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, + __location__ " Failed to traverse copying back"); tdb1_transaction_cancel(tdb); - tdb1_close(tmp_db); + tdb_close(tmp_db); return -1; } if (state.error) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL, __location__ " Error during second traversal\n")); + tdb->last_error = tdb_logerr(tdb, state.error, TDB_LOG_ERROR, + __location__ " Error during second traversal"); tdb1_transaction_cancel(tdb); - tdb1_close(tmp_db); + tdb_close(tmp_db); return -1; } - tdb1_close(tmp_db); + tdb_close(tmp_db); if (tdb1_transaction_commit(tdb) != 0) { - TDB1_LOG((tdb, TDB1_DEBUG_FATAL, __location__ " Failed to commit\n")); + tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, + __location__ " Failed to commit"); return -1; }