-/* We have to be able to fit a free record here. */
-#define MIN_DATA_LEN \
- (sizeof(struct tdb_free_record) - sizeof(struct tdb_used_record))
-
-/* We have a series of free lists, each one covering a "zone" of the file.
- *
- * For each zone we have a series of per-size buckets, and a final bucket for
- * "too big".
- *
- * It's possible to move the free_list_head, but *only* under the allrecord
- * lock. */
-static tdb_off_t free_list_off(struct tdb_context *tdb, unsigned int list)
-{
- return tdb->header.v.free_off + list * sizeof(tdb_off_t);
-}
-
-/* We're a library: playing with srandom() is unfriendly. srandom_r
- * probably lacks portability. We don't need very random here. */
-static unsigned int quick_random(struct tdb_context *tdb)
-{
- return getpid() + time(NULL) + (unsigned long)tdb;
-}
-
-/* Start by using a random zone to spread the load. */
-void tdb_zone_init(struct tdb_context *tdb)
-{
- /*
- * We read num_zones without a proper lock, so we could have
- * gotten a partial read. Since zone_bits is 1 byte long, we
- * can trust that; even if it's increased, the number of zones
- * cannot have decreased. And using the map size means we
- * will not start with a zone which hasn't been filled yet.
- */
- tdb->last_zone = quick_random(tdb)
- % ((tdb->map_size >> tdb->header.v.zone_bits) + 1);
-}
-