if necessary
note that "len" is the minimum length needed for the db
*/
-static int tdb1_oob(struct tdb_context *tdb, tdb1_off_t len, int probe)
+static int tdb1_oob(struct tdb_context *tdb, tdb1_off_t off, tdb1_len_t len,
+ int probe)
{
struct stat st;
- if (len <= tdb->file->map_size)
+ if (len + off < len) {
+ if (!probe) {
+ tdb->last_error = tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
+ "tdb1_oob off %d len %d wrap\n",
+ (int)off, (int)len);
+ }
+ return -1;
+ }
+
+ if (off + len <= tdb->file->map_size)
return 0;
if (tdb->flags & TDB_INTERNAL) {
if (!probe) {
tdb->last_error = tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
- "tdb1_oob len %d beyond internal malloc size %d",
- (int)len, (int)tdb->file->map_size);
+ "tdb1_oob len %d beyond internal malloc size %u",
+ (int)(off + len), (int)tdb->file->map_size);
}
return -1;
}
return -1;
}
- if (st.st_size < (size_t)len) {
+ if (st.st_size < (size_t)off + len) {
if (!probe) {
tdb->last_error = tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
- "tdb1_oob len %d beyond eof at %d",
- (int)len, (int)st.st_size);
+ "tdb1_oob len %u beyond eof at %u",
+ (int)(off + len), (int)st.st_size);
}
return -1;
}
+ /* Beware >4G files! */
+ if ((tdb1_off_t)st.st_size != st.st_size) {
+ tdb->last_error = tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
+ "tdb1_oob len %llu too large!\n",
+ (long long)st.st_size);
+ return -1;
+ }
+
/* Unmap, update size, remap */
if (tdb1_munmap(tdb) == -1) {
tdb->last_error = TDB_ERR_IO;
return -1;
}
- if (tdb->tdb1.io->tdb1_oob(tdb, off + len, 0) != 0)
+ if (tdb->tdb1.io->tdb1_oob(tdb, off, len, 0) != 0)
return -1;
if (tdb->file->map_ptr) {
static int tdb1_read(struct tdb_context *tdb, tdb1_off_t off, void *buf,
tdb1_len_t len, int cv)
{
- if (tdb->tdb1.io->tdb1_oob(tdb, off + len, 0) != 0) {
+ if (tdb->tdb1.io->tdb1_oob(tdb, off, len, 0) != 0) {
return -1;
}
addition -= written;
size += written;
}
+ tdb->stats.expands++;
return 0;
}
-/* expand the database at least size bytes by expanding the underlying
- file and doing the mmap again if necessary */
-int tdb1_expand(struct tdb_context *tdb, tdb1_off_t size)
+/* You need 'size', this tells you how much you should expand by. */
+tdb1_off_t tdb1_expand_adjust(tdb1_off_t map_size, tdb1_off_t size, int page_size)
{
- struct tdb1_record rec;
- tdb1_off_t offset, new_size, top_size, map_size;
-
- if (tdb1_lock(tdb, -1, F_WRLCK) == -1) {
- tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR,
- "lock failed in tdb1_expand");
- return -1;
- }
-
- /* must know about any previous expansions by another process */
- tdb->tdb1.io->tdb1_oob(tdb, tdb->file->map_size + 1, 1);
+ tdb1_off_t new_size, top_size;
/* limit size in order to avoid using up huge amounts of memory for
* in memory tdbs if an oddball huge record creeps in */
if (size > 100 * 1024) {
- top_size = tdb->file->map_size + size * 2;
+ top_size = map_size + size * 2;
} else {
- top_size = tdb->file->map_size + size * 100;
+ top_size = map_size + size * 100;
}
/* always make room for at least top_size more records, and at
least 25% more space. if the DB is smaller than 100MiB,
otherwise grow it by 10% only. */
- if (tdb->file->map_size > 100 * 1024 * 1024) {
- map_size = tdb->file->map_size * 1.10;
+ if (map_size > 100 * 1024 * 1024) {
+ new_size = map_size * 1.10;
} else {
- map_size = tdb->file->map_size * 1.25;
+ new_size = map_size * 1.25;
}
/* Round the database up to a multiple of the page size */
- new_size = MAX(top_size, map_size);
- size = TDB1_ALIGN(new_size, tdb->tdb1.page_size) - tdb->file->map_size;
+ new_size = MAX(top_size, new_size);
+ return TDB1_ALIGN(new_size, page_size) - map_size;
+}
+
+/* expand the database at least size bytes by expanding the underlying
+ file and doing the mmap again if necessary */
+int tdb1_expand(struct tdb_context *tdb, tdb1_off_t size)
+{
+ struct tdb1_record rec;
+ tdb1_off_t offset;
+
+ if (tdb1_lock(tdb, -1, F_WRLCK) == -1) {
+ tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR,
+ "lock failed in tdb1_expand");
+ return -1;
+ }
+
+ /* must know about any previous expansions by another process */
+ tdb->tdb1.io->tdb1_oob(tdb, tdb->file->map_size, 1, 1);
+
+ size = tdb1_expand_adjust(tdb->file->map_size, size,
+ tdb->tdb1.page_size);
if (!(tdb->flags & TDB_INTERNAL))
tdb1_munmap(tdb);
char *new_map_ptr = (char *)realloc(tdb->file->map_ptr,
tdb->file->map_size);
if (!new_map_ptr) {
+ tdb->last_error = tdb_logerr(tdb, TDB_ERR_OOM,
+ TDB_LOG_ERROR,
+ "tdb1_expand: no memory");
tdb->file->map_size -= size;
goto fail;
}
}
/* Give a piece of tdb data to a parser */
-
-int tdb1_parse_data(struct tdb_context *tdb, TDB_DATA key,
- tdb1_off_t offset, tdb1_len_t len,
- int (*parser)(TDB_DATA key, TDB_DATA data,
- void *private_data),
- void *private_data)
+enum TDB_ERROR tdb1_parse_data(struct tdb_context *tdb, TDB_DATA key,
+ tdb1_off_t offset, tdb1_len_t len,
+ enum TDB_ERROR (*parser)(TDB_DATA key,
+ TDB_DATA data,
+ void *private_data),
+ void *private_data)
{
TDB_DATA data;
- int result;
+ enum TDB_ERROR result;
data.dsize = len;
* Optimize by avoiding the malloc/memcpy/free, point the
* parser directly at the mmap area.
*/
- if (tdb->tdb1.io->tdb1_oob(tdb, offset+len, 0) != 0) {
- return -1;
+ if (tdb->tdb1.io->tdb1_oob(tdb, offset, len, 0) != 0) {
+ return tdb->last_error;
}
data.dptr = offset + (unsigned char *)tdb->file->map_ptr;
return parser(key, data, private_data);
}
if (!(data.dptr = tdb1_alloc_read(tdb, offset, len))) {
- return -1;
+ return tdb->last_error;
}
result = parser(key, data, private_data);
rec->magic, offset);
return -1;
}
- return tdb->tdb1.io->tdb1_oob(tdb, rec->next+sizeof(*rec), 0);
+ return tdb->tdb1.io->tdb1_oob(tdb, rec->next, sizeof(*rec), 0);
}
int tdb1_rec_write(struct tdb_context *tdb, tdb1_off_t offset, struct tdb1_record *rec)
enum TDB_ERROR tdb1_probe_length(struct tdb_context *tdb)
{
tdb->last_error = TDB_SUCCESS;
- tdb->tdb1.io->tdb1_oob(tdb, tdb->file->map_size + 1, true);
+ tdb->tdb1.io->tdb1_oob(tdb, tdb->file->map_size, 1, true);
return tdb->last_error;
}