It makes sense to share the mmap between multiple openers.
*features = hdr.features_offered;
*recovery = hdr.recovery;
if (*recovery) {
- if (*recovery < sizeof(hdr) || *recovery > tdb->map_size) {
+ if (*recovery < sizeof(hdr)
+ || *recovery > tdb->file->map_size) {
return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_check:"
" invalid recovery offset %zu",
size_t len;
enum TDB_ERROR ecode;
- for (len = 0; off + len < tdb->map_size; len++) {
+ for (len = 0; off + len < tdb->file->map_size; len++) {
char c;
ecode = tdb->methods->tread(tdb, off, &c, 1);
if (ecode != TDB_SUCCESS) {
enum TDB_ERROR ecode;
bool found_recovery = false;
- for (off = sizeof(struct tdb_header); off < tdb->map_size; off += len) {
+ for (off = sizeof(struct tdb_header);
+ off < tdb->file->map_size;
+ off += len) {
union {
struct tdb_used_record u;
struct tdb_free_record f;
tdb_logerr(tdb, TDB_SUCCESS, TDB_LOG_WARNING,
"Dead space at %zu-%zu (of %zu)",
(size_t)off, (size_t)(off + len),
- (size_t)tdb->map_size);
+ (size_t)tdb->file->map_size);
}
} else if (rec.r.magic == TDB_RECOVERY_MAGIC) {
ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec.r));
" length %zu",
(size_t)rec.r.len);
}
- if (rec.r.eof > tdb->map_size) {
+ if (rec.r.eof > tdb->file->map_size) {
return tdb_logerr(tdb, TDB_ERR_CORRUPT,
TDB_LOG_ERROR,
"tdb_check: invalid old EOF"
len = sizeof(rec.r) + rec.r.max_len;
} else if (frec_magic(&rec.f) == TDB_FREE_MAGIC) {
len = sizeof(rec.u) + frec_len(&rec.f);
- if (off + len > tdb->map_size) {
+ if (off + len > tdb->file->map_size) {
return tdb_logerr(tdb, TDB_ERR_CORRUPT,
TDB_LOG_ERROR,
"tdb_check: free overlength"
extra = rec_extra_padding(&rec.u);
len = sizeof(rec.u) + klen + dlen + extra;
- if (off + len > tdb->map_size) {
+ if (off + len > tdb->file->map_size) {
return tdb_logerr(tdb, TDB_ERR_CORRUPT,
TDB_LOG_ERROR,
"tdb_check: used overlength"
add_stat(tdb, alloc_coalesce_tried, 1);
end = off + sizeof(struct tdb_used_record) + data_len;
- while (end < tdb->map_size) {
+ while (end < tdb->file->map_size) {
const struct tdb_free_record *r;
tdb_off_t nb_off;
unsigned ftable, bucket;
/* always make room for at least 100 more records, and at
least 25% more space. */
- if (size * TDB_EXTENSION_FACTOR > tdb->map_size / 4)
+ if (size * TDB_EXTENSION_FACTOR > tdb->file->map_size / 4)
wanted = size * TDB_EXTENSION_FACTOR;
else
- wanted = tdb->map_size / 4;
+ wanted = tdb->file->map_size / 4;
wanted = adjust_size(0, wanted);
/* Only one person can expand file at a time. */
}
/* Someone else may have expanded the file, so retry. */
- old_size = tdb->map_size;
- tdb->methods->oob(tdb, tdb->map_size + 1, true);
- if (tdb->map_size != old_size) {
+ old_size = tdb->file->map_size;
+ tdb->methods->oob(tdb, tdb->file->map_size + 1, true);
+ if (tdb->file->map_size != old_size) {
tdb_unlock_expand(tdb, F_WRLCK);
return TDB_SUCCESS;
}
#include <assert.h>
#include <ccan/likely/likely.h>
-void tdb_munmap(struct tdb_context *tdb)
+void tdb_munmap(struct tdb_file *file)
{
- if (tdb->flags & TDB_INTERNAL)
+ if (file->fd == -1)
return;
- if (tdb->map_ptr) {
- munmap(tdb->map_ptr, tdb->map_size);
- tdb->map_ptr = NULL;
+ if (file->map_ptr) {
+ munmap(file->map_ptr, file->map_size);
+ file->map_ptr = NULL;
}
}
if (tdb->flags & TDB_NOMMAP)
return;
- tdb->map_ptr = mmap(NULL, tdb->map_size, tdb->mmap_flags,
- MAP_SHARED, tdb->file->fd, 0);
+ tdb->file->map_ptr = mmap(NULL, tdb->file->map_size, tdb->mmap_flags,
+ MAP_SHARED, tdb->file->fd, 0);
/*
* NB. When mmap fails it returns MAP_FAILED *NOT* NULL !!!!
*/
- if (tdb->map_ptr == MAP_FAILED) {
- tdb->map_ptr = NULL;
+ if (tdb->file->map_ptr == MAP_FAILED) {
+ tdb->file->map_ptr = NULL;
tdb_logerr(tdb, TDB_SUCCESS, TDB_LOG_WARNING,
"tdb_mmap failed for size %lld (%s)",
- (long long)tdb->map_size, strerror(errno));
+ (long long)tdb->file->map_size, strerror(errno));
}
}
|| (tdb->flags & TDB_NOLOCK)
|| tdb_has_expansion_lock(tdb));
- if (len <= tdb->map_size)
+ if (len <= tdb->file->map_size)
return 0;
if (tdb->flags & TDB_INTERNAL) {
if (!probe) {
"tdb_oob len %lld beyond internal"
" malloc size %lld",
(long long)len,
- (long long)tdb->map_size);
+ (long long)tdb->file->map_size);
}
return TDB_ERR_IO;
}
}
/* Unmap, update size, remap */
- tdb_munmap(tdb);
+ tdb_munmap(tdb->file);
- tdb->map_size = st.st_size;
+ tdb->file->map_size = st.st_size;
tdb_mmap(tdb);
return TDB_SUCCESS;
}
return ecode;
}
- if (tdb->map_ptr) {
- memcpy(off + (char *)tdb->map_ptr, buf, len);
+ if (tdb->file->map_ptr) {
+ memcpy(off + (char *)tdb->file->map_ptr, buf, len);
} else {
ssize_t ret;
ret = pwrite(tdb->file->fd, buf, len, off);
return ecode;
}
- if (tdb->map_ptr) {
- memcpy(buf, off + (char *)tdb->map_ptr, len);
+ if (tdb->file->map_ptr) {
+ memcpy(buf, off + (char *)tdb->file->map_ptr, len);
} else {
ssize_t r = pread(tdb->file->fd, buf, len, off);
if (r != len) {
"len=%zu (%s) map_size=%zu",
r, (size_t)off, (size_t)len,
strerror(errno),
- (size_t)tdb->map_size);
+ (size_t)tdb->file->map_size);
}
}
return TDB_SUCCESS;
}
if (tdb->flags & TDB_INTERNAL) {
- char *new = realloc(tdb->map_ptr, tdb->map_size + addition);
+ char *new = realloc(tdb->file->map_ptr,
+ tdb->file->map_size + addition);
if (!new) {
return tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"No memory to expand database");
}
- tdb->map_ptr = new;
- tdb->map_size += addition;
+ tdb->file->map_ptr = new;
+ tdb->file->map_size += addition;
} else {
/* Unmap before trying to write; old TDB claimed OpenBSD had
* problem with this otherwise. */
- tdb_munmap(tdb);
+ tdb_munmap(tdb->file);
/* If this fails, we try to fill anyway. */
- if (ftruncate(tdb->file->fd, tdb->map_size + addition))
+ if (ftruncate(tdb->file->fd, tdb->file->map_size + addition))
;
/* now fill the file with something. This ensures that the
file isn't sparse, which would be very bad if we ran out of
disk. This must be done with write, not via mmap */
memset(buf, 0x43, sizeof(buf));
- ecode = fill(tdb, buf, sizeof(buf), tdb->map_size, addition);
+ ecode = fill(tdb, buf, sizeof(buf), tdb->file->map_size,
+ addition);
if (ecode != TDB_SUCCESS)
return ecode;
- tdb->map_size += addition;
+ tdb->file->map_size += addition;
tdb_mmap(tdb);
}
return TDB_SUCCESS;
{
enum TDB_ERROR ecode;
- if (unlikely(!tdb->map_ptr))
+ if (unlikely(!tdb->file->map_ptr))
return NULL;
ecode = tdb_oob(tdb, off + len, true);
if (unlikely(ecode != TDB_SUCCESS))
return TDB_ERR_PTR(ecode);
- return (char *)tdb->map_ptr + off;
+ return (char *)tdb->file->map_ptr + off;
}
void add_stat_(struct tdb_context *tdb, uint64_t *s, size_t val)
struct tdb_lock_type *new_lck;
enum TDB_ERROR ecode;
- if (offset > TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE + tdb->map_size / 8) {
+ if (offset > (TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
+ + tdb->file->map_size / 8)) {
return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_nest_lock: invalid offset %zu ltype=%d",
(size_t)offset, ltype);
&& (!tdb->file->allrecord_lock.off || ltype != F_RDLCK)) {
tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
"tdb_allrecord_unlock: have %s lock",
- tdb->allrecord_lock.ltype == F_RDLCK
+ tdb->file->allrecord_lock.ltype == F_RDLCK
? "read" : "write");
return;
}
*hdr = newdb.hdr;
if (tdb->flags & TDB_INTERNAL) {
- tdb->map_size = sizeof(newdb);
- tdb->map_ptr = malloc(tdb->map_size);
- if (!tdb->map_ptr) {
+ tdb->file->map_size = sizeof(newdb);
+ tdb->file->map_ptr = malloc(tdb->file->map_size);
+ if (!tdb->file->map_ptr) {
return tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"tdb_new_database:"
" failed to allocate");
}
- memcpy(tdb->map_ptr, &newdb, tdb->map_size);
+ memcpy(tdb->file->map_ptr, &newdb, tdb->file->map_size);
return TDB_SUCCESS;
}
if (lseek(tdb->file->fd, 0, SEEK_SET) == -1) {
return TDB_SUCCESS;
}
+static enum TDB_ERROR tdb_new_file(struct tdb_context *tdb)
+{
+ tdb->file = malloc(sizeof(*tdb->file));
+ if (!tdb->file)
+ return tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
+ "tdb_open: could alloc tdb_file structure");
+ tdb->file->num_lockrecs = 0;
+ tdb->file->lockrecs = NULL;
+ tdb->file->allrecord_lock.count = 0;
+ return TDB_SUCCESS;
+}
+
struct tdb_context *tdb_open(const char *name, int tdb_flags,
int open_flags, mode_t mode,
union tdb_attribute *attr)
return NULL;
}
tdb->name = NULL;
- tdb->map_ptr = NULL;
tdb->direct_access = 0;
- tdb->map_size = sizeof(struct tdb_header);
tdb->flags = tdb_flags;
tdb->logfn = NULL;
tdb->transaction = NULL;
/* internal databases don't need any of the rest. */
if (tdb->flags & TDB_INTERNAL) {
tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP);
+ ecode = tdb_new_file(tdb);
+ if (ecode != TDB_SUCCESS) {
+ goto fail;
+ }
+ tdb->file->fd = -1;
ecode = tdb_new_database(tdb, seed, &hdr);
if (ecode != TDB_SUCCESS) {
goto fail;
goto fail;
}
- tdb->file = malloc(sizeof(*tdb->file));
- if (!tdb->file) {
- saved_errno = ENOMEM;
- tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
- "tdb_open: could alloc file");
+ ecode = tdb_new_file(tdb);
+ if (ecode != TDB_SUCCESS)
goto fail;
- }
tdb->file->next = files;
- tdb->file->num_lockrecs = 0;
- tdb->file->lockrecs = NULL;
- tdb->file->allrecord_lock.count = 0;
tdb->file->fd = fd;
tdb->file->device = st.st_dev;
tdb->file->inode = st.st_ino;
+ tdb->file->map_ptr = NULL;
+ tdb->file->map_size = sizeof(struct tdb_header);
} else {
/* FIXME */
ecode = tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_LOG_USE_ERROR,
tdb_unlock_open(tdb);
/* This make sure we have current map_size and mmap. */
- tdb->methods->oob(tdb, tdb->map_size + 1, true);
+ tdb->methods->oob(tdb, tdb->file->map_size + 1, true);
/* Now it's fully formed, recover if necessary. */
berr = tdb_needs_recovery(tdb);
#ifdef TDB_TRACE
close(tdb->tracefd);
#endif
- if (tdb->map_ptr) {
- if (tdb->flags & TDB_INTERNAL) {
- free(tdb->map_ptr);
- } else
- tdb_munmap(tdb);
- }
free((char *)tdb->name);
if (tdb->file) {
+ if (tdb->file->map_ptr) {
+ if (tdb->flags & TDB_INTERNAL) {
+ free(tdb->file->map_ptr);
+ } else
+ tdb_munmap(tdb->file);
+ }
if (close(tdb->file->fd) != 0)
tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_open: failed to close tdb fd"
tdb_transaction_cancel(tdb);
}
- if (tdb->map_ptr) {
+ if (tdb->file->map_ptr) {
if (tdb->flags & TDB_INTERNAL)
- free(tdb->map_ptr);
+ free(tdb->file->map_ptr);
else
- tdb_munmap(tdb);
+ tdb_munmap(tdb->file);
}
free((char *)tdb->name);
if (tdb->file) {
/* Single list of all TDBs, to detect multiple opens. */
struct tdb_file *next;
- /* The file descriptor. */
+ /* Mmap (if any), or malloc (for TDB_INTERNAL). */
+ void *map_ptr;
+
+ /* How much space has been mapped (<= current file size) */
+ tdb_len_t map_size;
+
+ /* The file descriptor (-1 for TDB_INTERNAL). */
int fd;
/* Lock information */
/* Filename of the database. */
const char *name;
- /* Mmap (if any), or malloc (for TDB_INTERNAL). */
- void *map_ptr;
-
/* Are we accessing directly? (debugging check). */
int direct_access;
- /* How much space has been mapped (<= current file size) */
- tdb_len_t map_size;
-
/* Operating read-only? (Opened O_RDONLY, or in traverse_read) */
bool read_only;
void *tdb_convert(const struct tdb_context *tdb, void *buf, tdb_len_t size);
/* Unmap and try to map the tdb. */
-void tdb_munmap(struct tdb_context *tdb);
+void tdb_munmap(struct tdb_file *file);
void tdb_mmap(struct tdb_context *tdb);
/* Either alloc a copy, or give direct access. Release frees or noop. */
tdb_len_t len;
tdb_len_t unc = 0;
- for (off = sizeof(struct tdb_header); off < tdb->map_size; off += len) {
+ for (off = sizeof(struct tdb_header);
+ off < tdb->file->map_size;
+ off += len) {
const union {
struct tdb_used_record u;
struct tdb_free_record f;
}
sprintf(*summary, SUMMARY_FORMAT,
- (size_t)tdb->map_size,
+ (size_t)tdb->file->map_size,
tally_num(keys) + tally_num(data),
tally_num(keys),
tally_min(keys), tally_mean(keys), tally_max(keys),
tally_num(hashes),
tally_min(hashes), tally_mean(hashes), tally_max(hashes),
hashesg ? hashesg : "",
- tally_total(keys, NULL) * 100.0 / tdb->map_size,
- tally_total(data, NULL) * 100.0 / tdb->map_size,
- tally_total(extra, NULL) * 100.0 / tdb->map_size,
- tally_total(freet, NULL) * 100.0 / tdb->map_size,
+ tally_total(keys, NULL) * 100.0 / tdb->file->map_size,
+ tally_total(data, NULL) * 100.0 / tdb->file->map_size,
+ tally_total(extra, NULL) * 100.0 / tdb->file->map_size,
+ tally_total(freet, NULL) * 100.0 / tdb->file->map_size,
(tally_num(keys) + tally_num(freet) + tally_num(hashes))
- * sizeof(struct tdb_used_record) * 100.0 / tdb->map_size,
+ * sizeof(struct tdb_used_record) * 100.0 / tdb->file->map_size,
tally_num(ftables) * sizeof(struct tdb_freetable)
- * 100.0 / tdb->map_size,
+ * 100.0 / tdb->file->map_size,
(tally_num(hashes)
* (sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS)
+ (sizeof(tdb_off_t) << TDB_TOPLEVEL_HASH_BITS)
+ sizeof(struct tdb_chain) * tally_num(chains))
- * 100.0 / tdb->map_size);
+ * 100.0 / tdb->file->map_size);
unlock:
free(hashesg);
break;
case TDB_NOMMAP:
tdb->flags |= TDB_NOMMAP;
- tdb_munmap(tdb);
+ tdb_munmap(tdb->file);
break;
case TDB_NOSYNC:
tdb->flags |= TDB_NOSYNC;
#include <stdbool.h>
/* FIXME: Check these! */
-#define INITIAL_TDB_MALLOC "open.c", 183, FAILTEST_MALLOC
-#define URANDOM_OPEN "open.c", 43, FAILTEST_OPEN
-#define URANDOM_READ "open.c", 23, FAILTEST_READ
+#define INITIAL_TDB_MALLOC "open.c", 195, FAILTEST_MALLOC
+#define URANDOM_OPEN "open.c", 42, FAILTEST_OPEN
+#define URANDOM_READ "open.c", 22, FAILTEST_READ
bool exit_check_log(struct failtest_call *history, unsigned num);
bool failmatch(const struct failtest_call *call,
memset(mem, 0x99, off);
/* Now populate our header, cribbing from a real TDB header. */
tdb = tdb_open(NULL, TDB_INTERNAL, O_RDWR, 0, &tap_log_attr);
- memcpy(mem, tdb->map_ptr, sizeof(struct tdb_header));
+ memcpy(mem, tdb->file->map_ptr, sizeof(struct tdb_header));
/* Mug the tdb we have to make it use this. */
- free(tdb->map_ptr);
- tdb->map_ptr = mem;
- tdb->map_size = off;
+ free(tdb->file->map_ptr);
+ tdb->file->map_ptr = mem;
+ tdb->file->map_size = off;
last_ftable = 0;
for (i = 0; i < layout->num_elems; i++) {
0600);
if (fd < 0)
err(1, "opening %s for writing", layout->filename);
- if (write(fd, tdb->map_ptr, tdb->map_size) != tdb->map_size)
+ if (write(fd, tdb->file->map_ptr, tdb->file->map_size)
+ != tdb->file->map_size)
err(1, "writing %s", layout->filename);
close(fd);
tdb_close(tdb);
if (!ok1(tdb))
break;
- val = tdb->map_size;
+ val = tdb->file->map_size;
/* Need some hash lock for expand. */
ok1(tdb_lock_hashes(tdb, 0, 1, F_WRLCK, TDB_LOCK_WAIT) == 0);
failtest_suppress = false;
}
failtest_suppress = true;
- ok1(tdb->map_size >= val + 1 * TDB_EXTENSION_FACTOR);
+ ok1(tdb->file->map_size >= val + 1 * TDB_EXTENSION_FACTOR);
ok1(tdb_unlock_hashes(tdb, 0, 1, F_WRLCK) == 0);
ok1(tdb_check(tdb, NULL, NULL) == 0);
- val = tdb->map_size;
+ val = tdb->file->map_size;
ok1(tdb_lock_hashes(tdb, 0, 1, F_WRLCK, TDB_LOCK_WAIT) == 0);
failtest_suppress = false;
if (!ok1(tdb_expand(tdb, 1024) == 0)) {
}
failtest_suppress = true;
ok1(tdb_unlock_hashes(tdb, 0, 1, F_WRLCK) == 0);
- ok1(tdb->map_size >= val + 1024 * TDB_EXTENSION_FACTOR);
+ ok1(tdb->file->map_size >= val + 1024 * TDB_EXTENSION_FACTOR);
ok1(tdb_check(tdb, NULL, NULL) == 0);
tdb_close(tdb);
}
ok1(tdb_check(tdb, NULL, NULL) == 0);
ok1(!empty_freetable(tdb));
- size = tdb->map_size;
+ size = tdb->file->map_size;
/* Insert minimal-length records until we expand. */
- for (j = 0; tdb->map_size == size; j++) {
+ for (j = 0; tdb->file->map_size == size; j++) {
was_empty = empty_freetable(tdb);
if (tdb_store(tdb, k, k, TDB_INSERT) != 0)
err(1, "Failed to store record %i", j);
else {
ok1(tap_log_messages == 0);
ok1(tdb_get_flags(tdb) & TDB_NOMMAP);
- ok1(tdb->map_ptr == NULL);
+ ok1(tdb->file->map_ptr == NULL);
}
tap_log_messages = 0;
else {
ok1(tap_log_messages == 0);
ok1(!(tdb_get_flags(tdb) & TDB_NOMMAP));
- ok1(tdb->map_ptr != NULL);
+ ok1(tdb->file->map_ptr != NULL);
}
tap_log_messages = 0;
O_RDWR|O_CREAT|O_TRUNC, 0600, &tap_log_attr);
ok1(external_agent_operation(agent, OPEN, filename) == SUCCESS);
- i = add_records_to_grow(agent, tdb->file->fd, tdb->map_size);
+ i = add_records_to_grow(agent, tdb->file->fd, tdb->file->map_size);
/* Do a traverse. */
ok1(tdb_traverse(tdb, NULL, NULL) == i);
static enum TDB_ERROR transaction_oob(struct tdb_context *tdb, tdb_off_t len,
bool probe)
{
- if (len <= tdb->map_size) {
+ if (len <= tdb->file->map_size) {
return TDB_SUCCESS;
}
if (!probe) {
tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_oob len %lld beyond transaction size %lld",
(long long)len,
- (long long)tdb->map_size);
+ (long long)tdb->file->map_size);
}
return TDB_ERR_IO;
}
/* add a write to the transaction elements, so subsequent
reads see the zero data */
- ecode = transaction_write(tdb, tdb->map_size, NULL, addition);
+ ecode = transaction_write(tdb, tdb->file->map_size, NULL, addition);
if (ecode == TDB_SUCCESS) {
- tdb->map_size += addition;
+ tdb->file->map_size += addition;
}
return ecode;
}
strerror(errno));
}
#ifdef MS_SYNC
- if (tdb->map_ptr) {
+ if (tdb->file->map_ptr) {
tdb_off_t moffset = offset & ~(getpagesize()-1);
- if (msync(moffset + (char *)tdb->map_ptr,
+ if (msync(moffset + (char *)tdb->file->map_ptr,
length + (offset - moffset), MS_SYNC) != 0) {
return tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
"tdb_transaction: msync failed: %s",
return;
}
- tdb->map_size = tdb->transaction->old_map_size;
+ tdb->file->map_size = tdb->transaction->old_map_size;
/* free all the transaction blocks */
for (i=0;i<tdb->transaction->num_blocks;i++) {
/* make sure we know about any file expansions already done by
anyone else */
- tdb->methods->oob(tdb, tdb->map_size + 1, true);
- tdb->transaction->old_map_size = tdb->map_size;
+ tdb->methods->oob(tdb, tdb->file->map_size + 1, true);
+ tdb->transaction->old_map_size = tdb->file->map_size;
/* finally hook the io methods, replacing them with
transaction specific methods */
= (((sizeof(rec) + *recovery_size) + getpagesize()-1)
& ~(getpagesize()-1))
- sizeof(rec);
- *recovery_offset = tdb->map_size;
+ *recovery_offset = tdb->file->map_size;
recovery_head = *recovery_offset;
/* Restore ->map_size before calling underlying expand_file.
Also so that we don't try to expand the file again in the
transaction commit, which would destroy the recovery
area */
- addition = (tdb->map_size - tdb->transaction->old_map_size) +
+ addition = (tdb->file->map_size - tdb->transaction->old_map_size) +
sizeof(rec) + *recovery_max_size;
- tdb->map_size = tdb->transaction->old_map_size;
+ tdb->file->map_size = tdb->transaction->old_map_size;
ecode = methods->expand_file(tdb, addition);
if (ecode != TDB_SUCCESS) {
return tdb_logerr(tdb, ecode, TDB_LOG_ERROR,
/* we have to reset the old map size so that we don't try to
expand the file again in the transaction commit, which
would destroy the recovery area */
- tdb->transaction->old_map_size = tdb->map_size;
+ tdb->transaction->old_map_size = tdb->file->map_size;
/* write the recovery header offset and sync - we can sync without a race here
as the magic ptr in the recovery record has not been set */
if (offset >= old_map_size) {
continue;
}
- if (offset + length > tdb->map_size) {
+ if (offset + length > tdb->file->map_size) {
free(data);
return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"tdb_transaction_setup_recovery:"
tdb->transaction->prepared = true;
/* expand the file to the new size if needed */
- if (tdb->map_size != tdb->transaction->old_map_size) {
- tdb_len_t add = tdb->map_size - tdb->transaction->old_map_size;
+ if (tdb->file->map_size != tdb->transaction->old_map_size) {
+ tdb_len_t add;
+
+ add = tdb->file->map_size - tdb->transaction->old_map_size;
/* Restore original map size for tdb_expand_file */
- tdb->map_size = tdb->transaction->old_map_size;
+ tdb->file->map_size = tdb->transaction->old_map_size;
ecode = methods->expand_file(tdb, add);
if (ecode != TDB_SUCCESS) {
tdb_logerr(tdb, ecode, TDB_LOG_ERROR,
tdb->transaction->num_blocks = 0;
/* ensure the new data is on disk */
- ecode = transaction_sync(tdb, 0, tdb->map_size);
+ ecode = transaction_sync(tdb, 0, tdb->file->map_size);
if (ecode != TDB_SUCCESS) {
return ecode;
}
free(data);
- ecode = transaction_sync(tdb, 0, tdb->map_size);
+ ecode = transaction_sync(tdb, 0, tdb->file->map_size);
if (ecode != TDB_SUCCESS) {
return tdb_logerr(tdb, ecode, TDB_LOG_ERROR,
"tdb_transaction_recover:"