return 0;
}
+/* You need 'size', this tells you how much you should expand by. */
+tdb_off_t tdb_expand_adjust(tdb_off_t map_size, tdb_off_t size, int page_size)
+{
+ tdb_off_t new_size, top_size;
+
+ /* limit size in order to avoid using up huge amounts of memory for
+ * in memory tdbs if an oddball huge record creeps in */
+ if (size > 100 * 1024) {
+ top_size = map_size + size * 2;
+ } else {
+ top_size = map_size + size * 100;
+ }
+
+ /* always make room for at least top_size more records, and at
+ least 25% more space. if the DB is smaller than 100MiB,
+ otherwise grow it by 10% only. */
+ if (map_size > 100 * 1024 * 1024) {
+ new_size = map_size * 1.10;
+ } else {
+ new_size = map_size * 1.25;
+ }
+
+ /* Round the database up to a multiple of the page size */
+ new_size = MAX(top_size, new_size);
+ return TDB_ALIGN(new_size, page_size) - map_size;
+}
/* expand the database at least size bytes by expanding the underlying
file and doing the mmap again if necessary */
int tdb_expand(struct tdb_context *tdb, tdb_off_t size)
{
struct tdb_record rec;
- tdb_off_t offset, new_size;
+ tdb_off_t offset;
if (tdb_lock(tdb, -1, F_WRLCK) == -1) {
TDB_LOG((tdb, TDB_DEBUG_ERROR, "lock failed in tdb_expand\n"));
/* must know about any previous expansions by another process */
tdb->methods->tdb_oob(tdb, tdb->map_size + 1, 1);
- /* always make room for at least 100 more records, and at
- least 25% more space. Round the database up to a multiple
- of the page size */
- new_size = MAX(tdb->map_size + size*100, tdb->map_size * 1.25);
- size = TDB_ALIGN(new_size, tdb->page_size) - tdb->map_size;
+ size = tdb_expand_adjust(tdb->map_size, size, tdb->page_size);
if (!(tdb->flags & TDB_INTERNAL))
tdb_munmap(tdb);
struct tdb_record *rec);
void tdb_io_init(struct tdb_context *tdb);
int tdb_expand(struct tdb_context *tdb, tdb_off_t size);
+tdb_off_t tdb_expand_adjust(tdb_off_t map_size, tdb_off_t size, int page_size);
int tdb_rec_free_read(struct tdb_context *tdb, tdb_off_t off,
struct tdb_record *rec);
void tdb_header_hash(struct tdb_context *tdb,
key.dsize = strlen("hi");
key.dptr = (void *)"hi";
- data->dptr = realloc(data->dptr, data->dsize + extra_len);
- memset(data->dptr + data->dsize, 'x', extra_len);
data->dsize += extra_len;
tdb_transaction_start(tdb);
tdb_store(tdb, key, *data, TDB_REPLACE);
tdb_transaction_commit(tdb);
- diag("TDB size = %zu", (size_t)tdb->map_size);
}
int main(int argc, char *argv[])
struct tdb_context *tdb;
size_t i;
TDB_DATA data;
+ struct tdb_record rec;
+ tdb_off_t off;
plan_tests(2);
tdb = tdb_open_ex("run-transaction-expand.tdb",
ok1(tdb);
data.dsize = 0;
- data.dptr = NULL;
+ data.dptr = calloc(1000, getpagesize());
/* Simulate a slowly growing record. */
for (i = 0; i < 1000; i++)
write_record(tdb, getpagesize(), &data);
- /* We should only be about 3 times larger than largest record. */
- ok1(tdb->map_size < 3 * i * getpagesize());
+ tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &off);
+ tdb_read(tdb, off, &rec, sizeof(rec), DOCONV());
+ diag("TDB size = %zu, recovery = %u-%u",
+ (size_t)tdb->map_size, off, off + sizeof(rec) + rec.rec_len);
+
+ /* We should only be about 5 times larger than largest record. */
+ ok1(tdb->map_size < 6 * i * getpagesize());
tdb_close(tdb);
free(data.dptr);
*recovery_size = tdb_recovery_size(tdb);
/* round up to a multiple of page size */
- *recovery_max_size = TDB_ALIGN(sizeof(rec) + *recovery_size, tdb->page_size) - sizeof(rec);
+ *recovery_max_size = tdb_expand_adjust(tdb->map_size,
+ *recovery_size,
+ tdb->page_size)
+ - sizeof(rec);
+
*recovery_offset = tdb->map_size;
recovery_head = *recovery_offset;