X-Git-Url: http://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Ftdb2%2Ftdb1_tdb.c;h=6f6f080da2298497127fd7ae602472f30ed8908d;hp=c69c61d0c0663f85e9eac8522a0d51c6080c0db5;hb=1be090a2d749713cfd0c4584cafb97bffd716189;hpb=60210a73ec08a7b34ba637ad19e6749cf6dc1952 diff --git a/ccan/tdb2/tdb1_tdb.c b/ccan/tdb2/tdb1_tdb.c index c69c61d0..6f6f080d 100644 --- a/ccan/tdb2/tdb1_tdb.c +++ b/ccan/tdb2/tdb1_tdb.c @@ -26,14 +26,13 @@ */ #include "tdb1_private.h" - -TDB_DATA tdb1_null; +#include /* non-blocking increment of the tdb sequence number if the tdb has been opened using the TDB_SEQNUM flag */ -void tdb1_increment_seqnum_nonblock(struct tdb1_context *tdb) +void tdb1_increment_seqnum_nonblock(struct tdb_context *tdb) { tdb1_off_t seqnum=0; @@ -53,7 +52,7 @@ void tdb1_increment_seqnum_nonblock(struct tdb1_context *tdb) increment the tdb sequence number if the tdb has been opened using the TDB_SEQNUM flag */ -static void tdb1_increment_seqnum(struct tdb1_context *tdb) +static void tdb1_increment_seqnum(struct tdb_context *tdb) { if (!(tdb->flags & TDB_SEQNUM)) { return; @@ -69,14 +68,18 @@ static void tdb1_increment_seqnum(struct tdb1_context *tdb) tdb1_nest_unlock(tdb, TDB1_SEQNUM_OFS, F_WRLCK); } -static int tdb1_key_compare(TDB_DATA key, TDB_DATA data, void *private_data) +static enum TDB_ERROR tdb1_key_compare(TDB_DATA key, TDB_DATA data, + void *matches_) { - return memcmp(data.dptr, key.dptr, data.dsize); + bool *matches = matches_; + *matches = (memcmp(data.dptr, key.dptr, data.dsize) == 0); + return TDB_SUCCESS; } -/* Returns 0 on fail. On success, return offset of record, and fills - in rec */ -static tdb1_off_t tdb1_find(struct tdb1_context *tdb, TDB_DATA key, uint32_t hash, +/* Returns 0 on fail; last_error will be TDB_ERR_NOEXIST if it simply + * wasn't there, otherwise a real error. + * On success, return offset of record, and fills in rec */ +static tdb1_off_t tdb1_find(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, struct tdb1_record *r) { tdb1_off_t rec_ptr; @@ -90,12 +93,30 @@ static tdb1_off_t tdb1_find(struct tdb1_context *tdb, TDB_DATA key, uint32_t has if (tdb1_rec_read(tdb, rec_ptr, r) == -1) return 0; - if (!TDB1_DEAD(r) && hash==r->full_hash - && key.dsize==r->key_len - && tdb1_parse_data(tdb, key, rec_ptr + sizeof(*r), - r->key_len, tdb1_key_compare, - NULL) == 0) { - return rec_ptr; + tdb->stats.compares++; + if (TDB1_DEAD(r)) { + tdb->stats.compare_wrong_bucket++; + } else if (key.dsize != r->key_len) { + tdb->stats.compare_wrong_keylen++; + } else if (hash != r->full_hash) { + tdb->stats.compare_wrong_rechash++; + } else { + enum TDB_ERROR ecode; + bool matches; + ecode = tdb1_parse_data(tdb, key, rec_ptr + sizeof(*r), + r->key_len, tdb1_key_compare, + &matches); + + if (ecode != TDB_SUCCESS) { + tdb->last_error = ecode; + return 0; + } + + if (!matches) { + tdb->stats.compare_wrong_keycmp++; + } else { + return rec_ptr; + } } /* detect tight infinite loop */ if (rec_ptr == r->next) { @@ -111,7 +132,7 @@ static tdb1_off_t tdb1_find(struct tdb1_context *tdb, TDB_DATA key, uint32_t has } /* As tdb1_find, but if you succeed, keep the lock */ -tdb1_off_t tdb1_find_lock_hash(struct tdb1_context *tdb, TDB_DATA key, uint32_t hash, int locktype, +tdb1_off_t tdb1_find_lock_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, int locktype, struct tdb1_record *rec) { uint32_t rec_ptr; @@ -123,13 +144,13 @@ tdb1_off_t tdb1_find_lock_hash(struct tdb1_context *tdb, TDB_DATA key, uint32_t return rec_ptr; } -static TDB_DATA _tdb1_fetch(struct tdb1_context *tdb, TDB_DATA key); +static TDB_DATA _tdb1_fetch(struct tdb_context *tdb, TDB_DATA key); /* update an entry in place - this only works if the new data size is <= the old data size and the key exists. on failure return -1. */ -static int tdb1_update_hash(struct tdb1_context *tdb, TDB_DATA key, uint32_t hash, TDB_DATA dbuf) +static int tdb1_update_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, TDB_DATA dbuf) { struct tdb1_record rec; tdb1_off_t rec_ptr; @@ -162,7 +183,7 @@ static int tdb1_update_hash(struct tdb1_context *tdb, TDB_DATA key, uint32_t has return -1; } - if (tdb->methods->tdb1_write(tdb, rec_ptr + sizeof(rec) + rec.key_len, + if (tdb->tdb1.io->tdb1_write(tdb, rec_ptr + sizeof(rec) + rec.key_len, dbuf.dptr, dbuf.dsize) == -1) return -1; @@ -181,7 +202,7 @@ static int tdb1_update_hash(struct tdb1_context *tdb, TDB_DATA key, uint32_t has * then the TDB_DATA will have zero length but * a non-zero pointer */ -static TDB_DATA _tdb1_fetch(struct tdb1_context *tdb, TDB_DATA key) +static TDB_DATA _tdb1_fetch(struct tdb_context *tdb, TDB_DATA key) { tdb1_off_t rec_ptr; struct tdb1_record rec; @@ -189,9 +210,12 @@ static TDB_DATA _tdb1_fetch(struct tdb1_context *tdb, TDB_DATA key) uint32_t hash; /* find which hash bucket it is in */ - hash = tdb->hash_fn(&key); - if (!(rec_ptr = tdb1_find_lock_hash(tdb,key,hash,F_RDLCK,&rec))) - return tdb1_null; + hash = tdb_hash(tdb, key.dptr, key.dsize); + if (!(rec_ptr = tdb1_find_lock_hash(tdb,key,hash,F_RDLCK,&rec))) { + ret.dptr = NULL; + ret.dsize = 0; + return ret; + } ret.dptr = tdb1_alloc_read(tdb, rec_ptr + sizeof(rec) + rec.key_len, rec.data_len); @@ -200,48 +224,30 @@ static TDB_DATA _tdb1_fetch(struct tdb1_context *tdb, TDB_DATA key) return ret; } -TDB_DATA tdb1_fetch(struct tdb1_context *tdb, TDB_DATA key) +enum TDB_ERROR tdb1_fetch(struct tdb_context *tdb, TDB_DATA key, TDB_DATA *data) { - TDB_DATA ret = _tdb1_fetch(tdb, key); - - return ret; + *data = _tdb1_fetch(tdb, key); + if (data->dptr == NULL) + return tdb->last_error; + return TDB_SUCCESS; } -/* - * Find an entry in the database and hand the record's data to a parsing - * function. The parsing function is executed under the chain read lock, so it - * should be fast and should not block on other syscalls. - * - * DON'T CALL OTHER TDB CALLS FROM THE PARSER, THIS MIGHT LEAD TO SEGFAULTS. - * - * For mmapped tdb's that do not have a transaction open it points the parsing - * function directly at the mmap area, it avoids the malloc/memcpy in this - * case. If a transaction is open or no mmap is available, it has to do - * malloc/read/parse/free. - * - * This is interesting for all readers of potentially large data structures in - * the tdb records, ldb indexes being one example. - * - * Return -1 if the record was not found. - */ - -int tdb1_parse_record(struct tdb1_context *tdb, TDB_DATA key, - int (*parser)(TDB_DATA key, TDB_DATA data, - void *private_data), - void *private_data) +enum TDB_ERROR tdb1_parse_record(struct tdb_context *tdb, TDB_DATA key, + enum TDB_ERROR (*parser)(TDB_DATA key, + TDB_DATA data, + void *private_data), + void *private_data) { tdb1_off_t rec_ptr; struct tdb1_record rec; - int ret; + enum TDB_ERROR ret; uint32_t hash; /* find which hash bucket it is in */ - hash = tdb->hash_fn(&key); + hash = tdb_hash(tdb, key.dptr, key.dsize); if (!(rec_ptr = tdb1_find_lock_hash(tdb,key,hash,F_RDLCK,&rec))) { - /* record not found */ - tdb->last_error = TDB_ERR_NOEXIST; - return -1; + return tdb->last_error; } ret = tdb1_parse_data(tdb, key, rec_ptr + sizeof(rec) + rec.key_len, @@ -258,7 +264,7 @@ int tdb1_parse_record(struct tdb1_context *tdb, TDB_DATA key, this doesn't match the conventions in the rest of this module, but is compatible with gdbm */ -static int tdb1_exists_hash(struct tdb1_context *tdb, TDB_DATA key, uint32_t hash) +static int tdb1_exists_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash) { struct tdb1_record rec; @@ -268,24 +274,25 @@ static int tdb1_exists_hash(struct tdb1_context *tdb, TDB_DATA key, uint32_t has return 1; } -int tdb1_exists(struct tdb1_context *tdb, TDB_DATA key) +int tdb1_exists(struct tdb_context *tdb, TDB_DATA key) { - uint32_t hash = tdb->hash_fn(&key); + uint32_t hash = tdb_hash(tdb, key.dptr, key.dsize); int ret; + assert(tdb->flags & TDB_VERSION1); ret = tdb1_exists_hash(tdb, key, hash); return ret; } /* actually delete an entry in the database given the offset */ -int tdb1_do_delete(struct tdb1_context *tdb, tdb1_off_t rec_ptr, struct tdb1_record *rec) +int tdb1_do_delete(struct tdb_context *tdb, tdb1_off_t rec_ptr, struct tdb1_record *rec) { tdb1_off_t last_ptr, i; struct tdb1_record lastrec; - if (tdb->read_only || tdb->traverse_read) return -1; + if ((tdb->flags & TDB_RDONLY) || tdb->tdb1.traverse_read) return -1; - if (((tdb->traverse_write != 0) && (!TDB1_DEAD(rec))) || + if (((tdb->tdb1.traverse_write != 0) && (!TDB1_DEAD(rec))) || tdb1_write_lock_record(tdb, rec_ptr) == -1) { /* Someone traversing here: mark it as dead */ rec->magic = TDB1_DEAD_MAGIC; @@ -313,7 +320,7 @@ int tdb1_do_delete(struct tdb1_context *tdb, tdb1_off_t rec_ptr, struct tdb1_rec return 0; } -static int tdb1_count_dead(struct tdb1_context *tdb, uint32_t hash) +static int tdb1_count_dead(struct tdb_context *tdb, uint32_t hash) { int res = 0; tdb1_off_t rec_ptr; @@ -338,7 +345,7 @@ static int tdb1_count_dead(struct tdb1_context *tdb, uint32_t hash) /* * Purge all DEAD records from a hash chain */ -static int tdb1_purge_dead(struct tdb1_context *tdb, uint32_t hash) +static int tdb1_purge_dead(struct tdb_context *tdb, uint32_t hash) { int res = -1; struct tdb1_record rec; @@ -374,13 +381,13 @@ static int tdb1_purge_dead(struct tdb1_context *tdb, uint32_t hash) } /* delete an entry in the database given a key */ -static int tdb1_delete_hash(struct tdb1_context *tdb, TDB_DATA key, uint32_t hash) +static int tdb1_delete_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash) { tdb1_off_t rec_ptr; struct tdb1_record rec; int ret; - if (tdb->max_dead_records != 0) { + if (tdb->tdb1.max_dead_records != 0) { /* * Allow for some dead records per hash chain, mainly for @@ -390,7 +397,7 @@ static int tdb1_delete_hash(struct tdb1_context *tdb, TDB_DATA key, uint32_t has if (tdb1_lock(tdb, TDB1_BUCKET(hash), F_WRLCK) == -1) return -1; - if (tdb1_count_dead(tdb, hash) >= tdb->max_dead_records) { + if (tdb1_count_dead(tdb, hash) >= tdb->tdb1.max_dead_records) { /* * Don't let the per-chain freelist grow too large, * delete all existing dead records @@ -427,11 +434,12 @@ static int tdb1_delete_hash(struct tdb1_context *tdb, TDB_DATA key, uint32_t has return ret; } -int tdb1_delete(struct tdb1_context *tdb, TDB_DATA key) +int tdb1_delete(struct tdb_context *tdb, TDB_DATA key) { - uint32_t hash = tdb->hash_fn(&key); + uint32_t hash = tdb_hash(tdb, key.dptr, key.dsize); int ret; + assert(tdb->flags & TDB_VERSION1); ret = tdb1_delete_hash(tdb, key, hash); return ret; } @@ -439,7 +447,7 @@ int tdb1_delete(struct tdb1_context *tdb, TDB_DATA key) /* * See if we have a dead record around with enough space */ -static tdb1_off_t tdb1_find_dead(struct tdb1_context *tdb, uint32_t hash, +static tdb1_off_t tdb1_find_dead(struct tdb_context *tdb, uint32_t hash, struct tdb1_record *r, tdb1_len_t length) { tdb1_off_t rec_ptr; @@ -465,7 +473,7 @@ static tdb1_off_t tdb1_find_dead(struct tdb1_context *tdb, uint32_t hash, return 0; } -static int _tdb1_store(struct tdb1_context *tdb, TDB_DATA key, +static int _tdb1_store(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag, uint32_t hash) { struct tdb1_record rec; @@ -479,16 +487,23 @@ static int _tdb1_store(struct tdb1_context *tdb, TDB_DATA key, tdb->last_error = TDB_ERR_EXISTS; goto fail; } + if (tdb->last_error != TDB_ERR_NOEXIST) { + goto fail; + } } else { /* first try in-place update, on modify or replace. */ if (tdb1_update_hash(tdb, key, hash, dbuf) == 0) { goto done; } - if (tdb->last_error == TDB_ERR_NOEXIST && - flag == TDB_MODIFY) { - /* if the record doesn't exist and we are in TDB1_MODIFY mode then - we should fail the store */ - goto fail; + if (tdb->last_error != TDB_SUCCESS) { + if (tdb->last_error != TDB_ERR_NOEXIST) { + goto fail; + } + if (flag == TDB_MODIFY) { + /* if the record doesn't exist and we are in TDB1_MODIFY mode then + we should fail the store */ + goto fail; + } } } /* reset the error code potentially set by the tdb1_update() */ @@ -504,7 +519,9 @@ static int _tdb1_store(struct tdb1_context *tdb, TDB_DATA key, fails and we are left with a dead spot in the tdb. */ if (!(p = (char *)malloc(key.dsize + dbuf.dsize))) { - tdb->last_error = TDB_ERR_OOM; + tdb->last_error = tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR, + "tdb1_store: out of memory" + " allocating copy"); goto fail; } @@ -512,7 +529,7 @@ static int _tdb1_store(struct tdb1_context *tdb, TDB_DATA key, if (dbuf.dsize) memcpy(p+key.dsize, dbuf.dptr, dbuf.dsize); - if (tdb->max_dead_records != 0) { + if (tdb->tdb1.max_dead_records != 0) { /* * Allow for some dead records per hash chain, look if we can * find one that can hold the new record. We need enough space @@ -529,7 +546,7 @@ static int _tdb1_store(struct tdb1_context *tdb, TDB_DATA key, rec.full_hash = hash; rec.magic = TDB1_MAGIC; if (tdb1_rec_write(tdb, rec_ptr, &rec) == -1 - || tdb->methods->tdb1_write( + || tdb->tdb1.io->tdb1_write( tdb, rec_ptr + sizeof(rec), p, key.dsize + dbuf.dsize) == -1) { goto fail; @@ -548,7 +565,7 @@ static int _tdb1_store(struct tdb1_context *tdb, TDB_DATA key, goto fail; } - if ((tdb->max_dead_records != 0) + if ((tdb->tdb1.max_dead_records != 0) && (tdb1_purge_dead(tdb, hash) == -1)) { tdb1_unlock(tdb, -1, F_WRLCK); goto fail; @@ -574,7 +591,7 @@ static int _tdb1_store(struct tdb1_context *tdb, TDB_DATA key, /* write out and point the top of the hash chain at it */ if (tdb1_rec_write(tdb, rec_ptr, &rec) == -1 - || tdb->methods->tdb1_write(tdb, rec_ptr+sizeof(rec), p, key.dsize+dbuf.dsize)==-1 + || tdb->tdb1.io->tdb1_write(tdb, rec_ptr+sizeof(rec), p, key.dsize+dbuf.dsize)==-1 || tdb1_ofs_write(tdb, TDB1_HASH_TOP(hash), &rec_ptr) == -1) { /* Need to tdb1_unallocate() here */ goto fail; @@ -596,18 +613,22 @@ static int _tdb1_store(struct tdb1_context *tdb, TDB_DATA key, return 0 on success, -1 on failure */ -int tdb1_store(struct tdb1_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag) +int tdb1_store(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag) { uint32_t hash; int ret; - if (tdb->read_only || tdb->traverse_read) { - tdb->last_error = TDB_ERR_RDONLY; + assert(tdb->flags & TDB_VERSION1); + + if ((tdb->flags & TDB_RDONLY) || tdb->tdb1.traverse_read) { + tdb->last_error = tdb_logerr(tdb, TDB_ERR_RDONLY, + TDB_LOG_USE_ERROR, + "tdb_store: read-only tdb"); return -1; } /* find which hash bucket it is in */ - hash = tdb->hash_fn(&key); + hash = tdb_hash(tdb, key.dptr, key.dsize); if (tdb1_lock(tdb, TDB1_BUCKET(hash), F_WRLCK) == -1) return -1; @@ -617,14 +638,16 @@ int tdb1_store(struct tdb1_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag) } /* Append to an entry. Create if not exist. */ -int tdb1_append(struct tdb1_context *tdb, TDB_DATA key, TDB_DATA new_dbuf) +int tdb1_append(struct tdb_context *tdb, TDB_DATA key, TDB_DATA new_dbuf) { uint32_t hash; TDB_DATA dbuf; int ret = -1; + assert(tdb->flags & TDB_VERSION1); + /* find which hash bucket it is in */ - hash = tdb->hash_fn(&key); + hash = tdb_hash(tdb, key.dptr, key.dsize); if (tdb1_lock(tdb, TDB1_BUCKET(hash), F_WRLCK) == -1) return -1; @@ -673,7 +696,7 @@ failed: The aim of this sequence number is to allow for a very lightweight test of a possible tdb change. */ -int tdb1_get_seqnum(struct tdb1_context *tdb) +int tdb1_get_seqnum(struct tdb_context *tdb) { tdb1_off_t seqnum=0; @@ -681,17 +704,12 @@ int tdb1_get_seqnum(struct tdb1_context *tdb) return seqnum; } -int tdb1_hash_size(struct tdb1_context *tdb) -{ - return tdb->header.hash_size; -} - /* add a region of the file to the freelist. Length is the size of the region in bytes, which includes the free list header that needs to be added */ -static int tdb1_free_region(struct tdb1_context *tdb, tdb1_off_t offset, ssize_t length) +static int tdb1_free_region(struct tdb_context *tdb, tdb1_off_t offset, ssize_t length) { struct tdb1_record rec; if (length <= sizeof(rec)) { @@ -721,7 +739,7 @@ static int tdb1_free_region(struct tdb1_context *tdb, tdb1_off_t offset, ssize_t This code carefully steps around the recovery area, leaving it alone */ -int tdb1_wipe_all(struct tdb1_context *tdb) +int tdb1_wipe_all(struct tdb_context *tdb) { int i; tdb1_off_t offset = 0; @@ -729,7 +747,7 @@ int tdb1_wipe_all(struct tdb1_context *tdb) tdb1_off_t recovery_head; tdb1_len_t recovery_size = 0; - if (tdb1_lockall(tdb) != 0) { + if (tdb_lockall(tdb) != TDB_SUCCESS) { return -1; } @@ -746,7 +764,7 @@ int tdb1_wipe_all(struct tdb1_context *tdb) if (recovery_head != 0) { struct tdb1_record rec; - if (tdb->methods->tdb1_read(tdb, recovery_head, &rec, sizeof(rec), TDB1_DOCONV()) == -1) { + if (tdb->tdb1.io->tdb1_read(tdb, recovery_head, &rec, sizeof(rec), TDB1_DOCONV()) == -1) { tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, "tdb1_wipe_all: failed to read recovery record"); return -1; @@ -755,7 +773,7 @@ int tdb1_wipe_all(struct tdb1_context *tdb) } /* wipe the hashes */ - for (i=0;iheader.hash_size;i++) { + for (i=0;itdb1.header.hash_size;i++) { if (tdb1_ofs_write(tdb, TDB1_HASH_TOP(i), &offset) == -1) { tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, "tdb1_wipe_all: failed to write hash %d", i); @@ -774,8 +792,8 @@ int tdb1_wipe_all(struct tdb1_context *tdb) for the recovery area */ if (recovery_size == 0) { /* the simple case - the whole file can be used as a freelist */ - data_len = (tdb->file->map_size - TDB1_DATA_START(tdb->header.hash_size)); - if (tdb1_free_region(tdb, TDB1_DATA_START(tdb->header.hash_size), data_len) != 0) { + data_len = (tdb->file->map_size - TDB1_DATA_START(tdb->tdb1.header.hash_size)); + if (tdb1_free_region(tdb, TDB1_DATA_START(tdb->tdb1.header.hash_size), data_len) != 0) { goto failed; } } else { @@ -787,8 +805,8 @@ int tdb1_wipe_all(struct tdb1_context *tdb) move the recovery area or we risk subtle data corruption */ - data_len = (recovery_head - TDB1_DATA_START(tdb->header.hash_size)); - if (tdb1_free_region(tdb, TDB1_DATA_START(tdb->header.hash_size), data_len) != 0) { + data_len = (recovery_head - TDB1_DATA_START(tdb->tdb1.header.hash_size)); + if (tdb1_free_region(tdb, TDB1_DATA_START(tdb->tdb1.header.hash_size), data_len) != 0) { goto failed; } /* and the 2nd free list entry after the recovery area - if any */ @@ -798,116 +816,15 @@ int tdb1_wipe_all(struct tdb1_context *tdb) } } - if (tdb1_unlockall(tdb) != 0) { - tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, - "tdb1_wipe_all: failed to unlock"); - goto failed; - } - + tdb1_increment_seqnum_nonblock(tdb); + tdb_unlockall(tdb); return 0; failed: - tdb1_unlockall(tdb); + tdb_unlockall(tdb); return -1; } -struct traverse_state { - enum TDB_ERROR error; - struct tdb1_context *dest_db; -}; - -/* - traverse function for repacking - */ -static int repack_traverse(struct tdb1_context *tdb, TDB_DATA key, TDB_DATA data, void *private_data) -{ - struct traverse_state *state = (struct traverse_state *)private_data; - if (tdb1_store(state->dest_db, key, data, TDB_INSERT) != 0) { - state->error = state->dest_db->last_error; - return -1; - } - return 0; -} - -/* - repack a tdb - */ -int tdb1_repack(struct tdb1_context *tdb) -{ - struct tdb1_context *tmp_db; - struct traverse_state state; - - if (tdb1_transaction_start(tdb) != 0) { - tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, - __location__ " Failed to start transaction"); - return -1; - } - - tmp_db = tdb1_open("tmpdb", tdb1_hash_size(tdb), TDB_INTERNAL, O_RDWR|O_CREAT, 0); - if (tmp_db == NULL) { - tdb->last_error = tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR, - __location__ " Failed to create tmp_db"); - tdb1_transaction_cancel(tdb); - return -1; - } - - state.error = TDB_SUCCESS; - state.dest_db = tmp_db; - - if (tdb1_traverse_read(tdb, repack_traverse, &state) == -1) { - tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, - __location__ " Failed to traverse copying out"); - tdb1_transaction_cancel(tdb); - tdb1_close(tmp_db); - return -1; - } - - if (state.error != TDB_SUCCESS) { - tdb->last_error = tdb_logerr(tdb, state.error, TDB_LOG_ERROR, - __location__ " Error during traversal"); - tdb1_transaction_cancel(tdb); - tdb1_close(tmp_db); - return -1; - } - - if (tdb1_wipe_all(tdb) != 0) { - tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, - __location__ " Failed to wipe database\n"); - tdb1_transaction_cancel(tdb); - tdb1_close(tmp_db); - return -1; - } - - state.error = TDB_SUCCESS; - state.dest_db = tdb; - - if (tdb1_traverse_read(tmp_db, repack_traverse, &state) == -1) { - tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, - __location__ " Failed to traverse copying back"); - tdb1_transaction_cancel(tdb); - tdb1_close(tmp_db); - return -1; - } - - if (state.error) { - tdb->last_error = tdb_logerr(tdb, state.error, TDB_LOG_ERROR, - __location__ " Error during second traversal"); - tdb1_transaction_cancel(tdb); - tdb1_close(tmp_db); - return -1; - } - - tdb1_close(tmp_db); - - if (tdb1_transaction_commit(tdb) != 0) { - tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, - __location__ " Failed to commit"); - return -1; - } - - return 0; -} - /* Even on files, we can get partial writes due to signals. */ bool tdb1_write_all(int fd, const void *buf, size_t count) {