return ret;
}
-static size_t adjust_size(size_t keylen, size_t datalen, bool want_extra)
+static size_t adjust_size(size_t keylen, size_t datalen)
{
size_t size = keylen + datalen;
- /* We want at least 50% growth for data. */
- if (want_extra)
- size += datalen/2;
-
if (size < TDB_MIN_DATA_LEN)
size = TDB_MIN_DATA_LEN;
{
ssize_t leftover;
- /* We might *want* extra, but not have it, so leftover is negative. */
- leftover = total_len - adjust_size(keylen, datalen, want_extra);
- if (leftover < (ssize_t)sizeof(struct tdb_free_record))
- return 0;
+ if (want_extra)
+ datalen += datalen / 2;
+ leftover = total_len - adjust_size(keylen, datalen);
- /* If we want extra anwyay, don't split unless we have 2x size. */
- if (want_extra && leftover <= datalen / 2)
+ if (leftover < (ssize_t)sizeof(struct tdb_free_record))
return 0;
return leftover;
tdb_off_t off, b_off,best_off;
struct tdb_free_record pad, best = { 0 }, *r;
double multiplier;
- size_t size = keylen + datalen;
+ size_t size = adjust_size(keylen, datalen);
again:
b_off = bucket_off(zone_off, bucket);
leftover = record_leftover(keylen, datalen, want_extra,
best.data_len);
+ assert(keylen + datalen + leftover <= best.data_len);
/* We need to mark non-free before we drop lock, otherwise
* coalesce() could try to merge it! */
if (set_header(tdb, &rec, keylen, datalen,
{
tdb_off_t start_zone = tdb->zone_off, off;
bool wrapped = false;
- size_t size = adjust_size(keylen, datalen, want_extra);
+ size_t size = adjust_size(keylen, datalen);
/* If they are growing, add 50% to get to higher bucket. */
if (want_extra)
b = size_to_bucket(tdb->zhdr.zone_bits, size);
for (b = find_free_head(tdb, b);
b <= BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits);
- b += find_free_head(tdb, b + 1)) {
+ b = find_free_head(tdb, b + 1)) {
/* Try getting one from list. */
off = lock_and_alloc(tdb, tdb->zone_off,
tdb->zhdr.zone_bits,
if (likely(off != 0))
break;
- if (tdb_expand(tdb, adjust_size(keylen, datalen, growing)))
+ if (tdb_expand(tdb, adjust_size(keylen, datalen)))
return TDB_OFF_ERR;
}