-static tdb_len_t adjust_size(size_t keylen, size_t datalen, bool growing)
-{
- tdb_len_t size = keylen + datalen;
-
- if (size < MIN_DATA_LEN)
- size = MIN_DATA_LEN;
-
- /* Overallocate if this is coming from an enlarging store. */
- if (growing)
- size += datalen / 2;
-
- /* Round to next uint64_t boundary. */
- return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
-}
-
-/* If this fails, try tdb_expand. */
-tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
- uint64_t hash, bool growing)
-{
- tdb_off_t off;
- tdb_len_t size, actual;
- struct tdb_used_record rec;
-
- /* We don't want header to change during this! */
- assert(tdb->header_uptodate);
-
- size = adjust_size(keylen, datalen, growing);
-
- off = get_free(tdb, size, &actual);
- if (unlikely(off == TDB_OFF_ERR || off == 0))
- return off;
-
- /* Some supergiant values can't be encoded. */
- if (set_header(tdb, &rec, keylen, datalen, actual, hash) != 0) {
- add_free_record(tdb, off, sizeof(rec) + actual);
- return TDB_OFF_ERR;
- }
-
- if (tdb_write_convert(tdb, off, &rec, sizeof(rec)) != 0)
- return TDB_OFF_ERR;
-
- return off;
-}
-
-static bool larger_buckets_might_help(struct tdb_context *tdb)
-{
- /* If our buckets are already covering 1/8 of a zone, don't
- * bother (note: might become an 1/16 of a zone if we double
- * zone size). */
- tdb_len_t size = (1ULL << tdb->header.v.zone_bits) / 8;
-
- if (size >= MIN_DATA_LEN
- && size_to_bucket(tdb, size) < tdb->header.v.free_buckets) {
- return false;
- }
-
- /* FIXME: Put stats in tdb_context or examine db itself! */
- /* It's fairly cheap to do as we expand database. */
- return true;
-}
-
-static bool zones_happy(struct tdb_context *tdb)
-{
- /* FIXME: look at distribution of zones. */
- return true;
-}
-
-/* Returns how much extra room we get, or TDB_OFF_ERR. */
-static tdb_len_t expand_to_fill_zones(struct tdb_context *tdb)