return ret;
}
-static size_t adjust_size(size_t keylen, size_t datalen, bool want_extra)
+static size_t adjust_size(size_t keylen, size_t datalen)
{
size_t size = keylen + datalen;
- /* We want at least 50% growth for data. */
- if (want_extra)
- size += datalen/2;
-
if (size < TDB_MIN_DATA_LEN)
size = TDB_MIN_DATA_LEN;
{
ssize_t leftover;
- /* We might *want* extra, but not have it, so leftover is negative. */
- leftover = total_len - adjust_size(keylen, datalen, want_extra);
- if (leftover < (ssize_t)sizeof(struct tdb_free_record))
- return 0;
+ if (want_extra)
+ datalen += datalen / 2;
+ leftover = total_len - adjust_size(keylen, datalen);
- /* If we want extra anwyay, don't split unless we have 2x size. */
- if (want_extra && leftover <= datalen / 2)
+ if (leftover < (ssize_t)sizeof(struct tdb_free_record))
return 0;
return leftover;
tdb_off_t off, b_off,best_off;
struct tdb_free_record pad, best = { 0 }, *r;
double multiplier;
- size_t size = keylen + datalen;
+ size_t size = adjust_size(keylen, datalen);
again:
b_off = bucket_off(zone_off, bucket);
leftover = record_leftover(keylen, datalen, want_extra,
best.data_len);
+ assert(keylen + datalen + leftover <= best.data_len);
/* We need to mark non-free before we drop lock, otherwise
* coalesce() could try to merge it! */
if (set_header(tdb, &rec, keylen, datalen,
{
tdb_off_t start_zone = tdb->zone_off, off;
bool wrapped = false;
- size_t size = adjust_size(keylen, datalen, want_extra);
+ size_t size = adjust_size(keylen, datalen);
/* If they are growing, add 50% to get to higher bucket. */
if (want_extra)
if (likely(off != 0))
break;
- if (tdb_expand(tdb, adjust_size(keylen, datalen, growing)))
+ if (tdb_expand(tdb, adjust_size(keylen, datalen)))
return TDB_OFF_ERR;
}
#include <ccan/tdb2/hash.c>
#include <ccan/tdb2/check.c>
#include <ccan/tap/tap.h>
+#include <ccan/ilog/ilog.h>
#include "logging.h"
#define MAX_SIZE 13100
#define SIZE_STEP 131
+static tdb_off_t tdb_offset(struct tdb_context *tdb, struct tdb_data key)
+{
+ tdb_off_t off;
+ struct tdb_used_record rec;
+ struct hash_info h;
+
+ off = find_and_lock(tdb, key, F_RDLCK, &h, &rec, NULL);
+ if (unlikely(off == TDB_OFF_ERR))
+ return 0;
+ tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_RDLCK);
+ return off;
+}
+
int main(int argc, char *argv[])
{
- unsigned int i, j;
+ unsigned int i, j, moves;
struct tdb_context *tdb;
unsigned char *buffer;
+ tdb_off_t oldoff = 0, newoff;
int flags[] = { TDB_INTERNAL, TDB_DEFAULT, TDB_NOMMAP,
TDB_INTERNAL|TDB_CONVERT, TDB_CONVERT,
TDB_NOMMAP|TDB_CONVERT };
buffer[i] = i;
plan_tests(sizeof(flags) / sizeof(flags[0])
- * ((2 + MAX_SIZE/SIZE_STEP * 4) * 2 + 6)
+ * ((3 + MAX_SIZE/SIZE_STEP * 4) * 2 + 6)
+ 1);
/* Using tdb_store. */
if (!tdb)
continue;
+ moves = 0;
for (j = 0; j < MAX_SIZE; j += SIZE_STEP) {
data.dptr = buffer;
data.dsize = j;
ok1(data.dsize == j);
ok1(memcmp(data.dptr, buffer, data.dsize) == 0);
free(data.dptr);
+ newoff = tdb_offset(tdb, key);
+ if (newoff != oldoff)
+ moves++;
+ oldoff = newoff;
}
ok1(!tdb_has_locks(tdb));
+ /* We should increase by 50% each time... */
+ ok(moves <= ilog64(j / SIZE_STEP)*2, "Moved %u times", moves);
tdb_close(tdb);
}
if (!tdb)
continue;
+ moves = 0;
for (j = 0; j < MAX_SIZE; j += SIZE_STEP) {
data.dptr = buffer + prev_len;
data.dsize = j - prev_len;
ok1(memcmp(data.dptr, buffer, data.dsize) == 0);
free(data.dptr);
prev_len = data.dsize;
+ newoff = tdb_offset(tdb, key);
+ if (newoff != oldoff)
+ moves++;
+ oldoff = newoff;
}
ok1(!tdb_has_locks(tdb));
+ /* We should increase by 50% each time... */
+ ok(moves <= ilog64(j / SIZE_STEP)*2, "Moved %u times", moves);
tdb_close(tdb);
}