X-Git-Url: https://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Ftdb2%2Ftdb1_io.c;h=488f3d847837e2009b4eee2968932c3dbbfaa080;hp=f3d139d0434a97fc7c9dbb6a4deea6cb6d2cd43e;hb=926996e88c32445c874ff9c4f47f159db6b45995;hpb=670ba98f74b52df541d153eeab9d3310932e75cd diff --git a/ccan/tdb2/tdb1_io.c b/ccan/tdb2/tdb1_io.c index f3d139d0..488f3d84 100644 --- a/ccan/tdb2/tdb1_io.c +++ b/ccan/tdb2/tdb1_io.c @@ -36,16 +36,26 @@ if necessary note that "len" is the minimum length needed for the db */ -static int tdb1_oob(struct tdb_context *tdb, tdb1_off_t len, int probe) +static int tdb1_oob(struct tdb_context *tdb, tdb1_off_t off, tdb1_len_t len, + int probe) { struct stat st; - if (len <= tdb->file->map_size) + if (len + off < len) { + if (!probe) { + tdb->last_error = tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR, + "tdb1_oob off %d len %d wrap\n", + (int)off, (int)len); + } + return -1; + } + + if (off + len <= tdb->file->map_size) return 0; if (tdb->flags & TDB_INTERNAL) { if (!probe) { tdb->last_error = tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR, - "tdb1_oob len %d beyond internal malloc size %d", - (int)len, (int)tdb->file->map_size); + "tdb1_oob len %d beyond internal malloc size %u", + (int)(off + len), (int)tdb->file->map_size); } return -1; } @@ -55,15 +65,23 @@ static int tdb1_oob(struct tdb_context *tdb, tdb1_off_t len, int probe) return -1; } - if (st.st_size < (size_t)len) { + if (st.st_size < (size_t)off + len) { if (!probe) { tdb->last_error = tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR, - "tdb1_oob len %d beyond eof at %d", - (int)len, (int)st.st_size); + "tdb1_oob len %u beyond eof at %u", + (int)(off + len), (int)st.st_size); } return -1; } + /* Beware >4G files! */ + if ((tdb1_off_t)st.st_size != st.st_size) { + tdb->last_error = tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR, + "tdb1_oob len %llu too large!\n", + (long long)st.st_size); + return -1; + } + /* Unmap, update size, remap */ if (tdb1_munmap(tdb) == -1) { tdb->last_error = TDB_ERR_IO; @@ -87,7 +105,7 @@ static int tdb1_write(struct tdb_context *tdb, tdb1_off_t off, return -1; } - if (tdb->tdb1.io->tdb1_oob(tdb, off + len, 0) != 0) + if (tdb->tdb1.io->tdb1_oob(tdb, off, len, 0) != 0) return -1; if (tdb->file->map_ptr) { @@ -136,7 +154,7 @@ void *tdb1_convert(void *buf, uint32_t size) static int tdb1_read(struct tdb_context *tdb, tdb1_off_t off, void *buf, tdb1_len_t len, int cv) { - if (tdb->tdb1.io->tdb1_oob(tdb, off + len, 0) != 0) { + if (tdb->tdb1.io->tdb1_oob(tdb, off, len, 0) != 0) { return -1; } @@ -312,42 +330,51 @@ static int tdb1_expand_file(struct tdb_context *tdb, tdb1_off_t size, tdb1_off_t } -/* expand the database at least size bytes by expanding the underlying - file and doing the mmap again if necessary */ -int tdb1_expand(struct tdb_context *tdb, tdb1_off_t size) +/* You need 'size', this tells you how much you should expand by. */ +tdb1_off_t tdb1_expand_adjust(tdb1_off_t map_size, tdb1_off_t size, int page_size) { - struct tdb1_record rec; - tdb1_off_t offset, new_size, top_size, map_size; - - if (tdb1_lock(tdb, -1, F_WRLCK) == -1) { - tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, - "lock failed in tdb1_expand"); - return -1; - } - - /* must know about any previous expansions by another process */ - tdb->tdb1.io->tdb1_oob(tdb, tdb->file->map_size + 1, 1); + tdb1_off_t new_size, top_size; /* limit size in order to avoid using up huge amounts of memory for * in memory tdbs if an oddball huge record creeps in */ if (size > 100 * 1024) { - top_size = tdb->file->map_size + size * 2; + top_size = map_size + size * 2; } else { - top_size = tdb->file->map_size + size * 100; + top_size = map_size + size * 100; } /* always make room for at least top_size more records, and at least 25% more space. if the DB is smaller than 100MiB, otherwise grow it by 10% only. */ - if (tdb->file->map_size > 100 * 1024 * 1024) { - map_size = tdb->file->map_size * 1.10; + if (map_size > 100 * 1024 * 1024) { + new_size = map_size * 1.10; } else { - map_size = tdb->file->map_size * 1.25; + new_size = map_size * 1.25; } /* Round the database up to a multiple of the page size */ - new_size = MAX(top_size, map_size); - size = TDB1_ALIGN(new_size, tdb->tdb1.page_size) - tdb->file->map_size; + new_size = MAX(top_size, new_size); + return TDB1_ALIGN(new_size, page_size) - map_size; +} + +/* expand the database at least size bytes by expanding the underlying + file and doing the mmap again if necessary */ +int tdb1_expand(struct tdb_context *tdb, tdb1_off_t size) +{ + struct tdb1_record rec; + tdb1_off_t offset; + + if (tdb1_lock(tdb, -1, F_WRLCK) == -1) { + tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, + "lock failed in tdb1_expand"); + return -1; + } + + /* must know about any previous expansions by another process */ + tdb->tdb1.io->tdb1_oob(tdb, tdb->file->map_size, 1, 1); + + size = tdb1_expand_adjust(tdb->file->map_size, size, + tdb->tdb1.page_size); if (!(tdb->flags & TDB_INTERNAL)) tdb1_munmap(tdb); @@ -456,7 +483,7 @@ enum TDB_ERROR tdb1_parse_data(struct tdb_context *tdb, TDB_DATA key, * Optimize by avoiding the malloc/memcpy/free, point the * parser directly at the mmap area. */ - if (tdb->tdb1.io->tdb1_oob(tdb, offset+len, 0) != 0) { + if (tdb->tdb1.io->tdb1_oob(tdb, offset, len, 0) != 0) { return tdb->last_error; } data.dptr = offset + (unsigned char *)tdb->file->map_ptr; @@ -483,7 +510,7 @@ int tdb1_rec_read(struct tdb_context *tdb, tdb1_off_t offset, struct tdb1_record rec->magic, offset); return -1; } - return tdb->tdb1.io->tdb1_oob(tdb, rec->next+sizeof(*rec), 0); + return tdb->tdb1.io->tdb1_oob(tdb, rec->next, sizeof(*rec), 0); } int tdb1_rec_write(struct tdb_context *tdb, tdb1_off_t offset, struct tdb1_record *rec) @@ -511,6 +538,6 @@ void tdb1_io_init(struct tdb_context *tdb) enum TDB_ERROR tdb1_probe_length(struct tdb_context *tdb) { tdb->last_error = TDB_SUCCESS; - tdb->tdb1.io->tdb1_oob(tdb, tdb->file->map_size + 1, true); + tdb->tdb1.io->tdb1_oob(tdb, tdb->file->map_size, 1, true); return tdb->last_error; }