2 Trivial Database 2: free list/block handling
3 Copyright (C) Rusty Russell 2010
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 3 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <ccan/likely/likely.h>
24 /* We have to be able to fit a free record here. */
25 #define MIN_DATA_LEN \
26 (sizeof(struct tdb_free_record) - sizeof(struct tdb_used_record))
28 /* We have a series of free lists, each one covering a "zone" of the file.
30 * For each zone we have a series of per-size buckets, and a final bucket for
33 * It's possible to move the free_list_head, but *only* under the allrecord
35 static tdb_off_t free_list_off(struct tdb_context *tdb, unsigned int list)
37 return tdb->header.v.free_off + list * sizeof(tdb_off_t);
40 /* We're a library: playing with srandom() is unfriendly. srandom_r
41 * probably lacks portability. We don't need very random here. */
42 static unsigned int quick_random(struct tdb_context *tdb)
44 return getpid() + time(NULL) + (unsigned long)tdb;
47 /* Start by using a random zone to spread the load. */
48 void tdb_zone_init(struct tdb_context *tdb)
51 * We read num_zones without a proper lock, so we could have
52 * gotten a partial read. Since zone_bits is 1 byte long, we
53 * can trust that; even if it's increased, the number of zones
54 * cannot have decreased. And using the map size means we
55 * will not start with a zone which hasn't been filled yet.
57 tdb->last_zone = quick_random(tdb)
58 % ((tdb->map_size >> tdb->header.v.zone_bits) + 1);
61 static unsigned fls64(uint64_t val)
64 if (val <= ULONG_MAX) {
65 /* This is significantly faster! */
66 return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
73 if (!(val & 0xffffffff00000000ull)) {
77 if (!(val & 0xffff000000000000ull)) {
81 if (!(val & 0xff00000000000000ull)) {
85 if (!(val & 0xf000000000000000ull)) {
89 if (!(val & 0xc000000000000000ull)) {
93 if (!(val & 0x8000000000000000ull)) {
103 /* In which bucket would we find a particular record size? (ignoring header) */
104 unsigned int size_to_bucket(struct tdb_context *tdb, tdb_len_t data_len)
108 /* We can't have records smaller than this. */
109 assert(data_len >= MIN_DATA_LEN);
111 /* Ignoring the header... */
112 if (data_len - MIN_DATA_LEN <= 64) {
113 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 6. */
114 bucket = (data_len - MIN_DATA_LEN) / 8;
116 /* After that we go power of 2. */
117 bucket = fls64(data_len - MIN_DATA_LEN) + 2;
120 if (unlikely(bucket > tdb->header.v.free_buckets))
121 bucket = tdb->header.v.free_buckets;
125 /* What zone does a block belong in? */
126 tdb_off_t zone_of(struct tdb_context *tdb, tdb_off_t off)
128 assert(tdb->header_uptodate);
130 return off >> tdb->header.v.zone_bits;
133 /* Returns fl->max_bucket + 1, or list number to search. */
134 static tdb_off_t find_free_head(struct tdb_context *tdb, tdb_off_t bucket)
136 tdb_off_t first, off;
138 /* Speculatively search for a non-zero bucket. */
139 first = tdb->last_zone * (tdb->header.v.free_buckets+1) + bucket;
140 off = tdb_find_nonzero_off(tdb, free_list_off(tdb, first),
141 tdb->header.v.free_buckets - bucket);
145 static int remove_from_list(struct tdb_context *tdb,
146 tdb_off_t list, struct tdb_free_record *r)
152 off = free_list_off(tdb, list);
154 off = r->prev + offsetof(struct tdb_free_record, next);
156 /* r->prev->next = r->next */
157 if (tdb_write_off(tdb, off, r->next)) {
162 off = r->next + offsetof(struct tdb_free_record, prev);
163 /* r->next->prev = r->prev */
164 if (tdb_write_off(tdb, off, r->prev)) {
171 /* Enqueue in this free list. */
172 static int enqueue_in_free(struct tdb_context *tdb,
175 struct tdb_free_record *new)
178 /* new->next = head. */
179 new->next = tdb_read_off(tdb, free_list_off(tdb, list));
180 if (new->next == TDB_OFF_ERR)
184 /* next->prev = new. */
185 if (tdb_write_off(tdb, new->next
186 + offsetof(struct tdb_free_record, prev),
191 if (tdb_write_off(tdb, free_list_off(tdb, list), off) != 0)
194 return tdb_write_convert(tdb, off, new, sizeof(*new));
197 /* List isn't locked. */
198 int add_free_record(struct tdb_context *tdb,
199 tdb_off_t off, tdb_len_t len_with_header)
201 struct tdb_free_record new;
205 assert(len_with_header >= sizeof(new));
207 new.magic = TDB_FREE_MAGIC;
208 new.data_len = len_with_header - sizeof(struct tdb_used_record);
210 tdb->last_zone = zone_of(tdb, off);
211 list = tdb->last_zone * (tdb->header.v.free_buckets+1)
212 + size_to_bucket(tdb, new.data_len);
214 if (tdb_lock_free_list(tdb, list, TDB_LOCK_WAIT) != 0)
217 ret = enqueue_in_free(tdb, list, off, &new);
218 tdb_unlock_free_list(tdb, list);
222 /* If we have enough left over to be useful, split that off. */
223 static int to_used_record(struct tdb_context *tdb,
229 struct tdb_used_record used;
232 leftover = total_len - needed;
233 if (leftover < sizeof(struct tdb_free_record))
236 *actual = total_len - leftover;
239 if (add_free_record(tdb, off + sizeof(used) + *actual,
246 /* Note: we unlock the current list if we coalesce or fail. */
247 static int coalesce(struct tdb_context *tdb, tdb_off_t off,
248 tdb_off_t list, tdb_len_t data_len)
250 struct tdb_free_record pad, *r;
251 tdb_off_t end = off + sizeof(struct tdb_used_record) + data_len;
253 while (!tdb->methods->oob(tdb, end + sizeof(*r), 1)) {
256 r = tdb_get(tdb, end, &pad, sizeof(pad));
260 if (r->magic != TDB_FREE_MAGIC)
263 nlist = zone_of(tdb, end) * (tdb->header.v.free_buckets+1)
264 + size_to_bucket(tdb, r->data_len);
266 /* We may be violating lock order here, so best effort. */
267 if (tdb_lock_free_list(tdb, nlist, TDB_LOCK_NOWAIT) == -1)
270 /* Now we have lock, re-check. */
271 r = tdb_get(tdb, end, &pad, sizeof(pad));
273 tdb_unlock_free_list(tdb, nlist);
277 if (unlikely(r->magic != TDB_FREE_MAGIC)) {
278 tdb_unlock_free_list(tdb, nlist);
282 if (remove_from_list(tdb, list, r) == -1) {
283 tdb_unlock_free_list(tdb, nlist);
287 end += sizeof(struct tdb_used_record) + r->data_len;
288 tdb_unlock_free_list(tdb, nlist);
291 /* Didn't find any adjacent free? */
292 if (end == off + sizeof(struct tdb_used_record) + data_len)
295 /* OK, expand record */
296 r = tdb_get(tdb, off, &pad, sizeof(pad));
300 if (remove_from_list(tdb, list, r) == -1)
303 /* We have to drop this to avoid deadlocks. */
304 tdb_unlock_free_list(tdb, list);
306 if (add_free_record(tdb, off, end - off) == -1)
311 /* To unify error paths, we *always* unlock list. */
312 tdb_unlock_free_list(tdb, list);
316 /* We need size bytes to put our key and data in. */
317 static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
318 tdb_off_t bucket, size_t size,
322 tdb_off_t off, prev, best_off;
323 struct tdb_free_record pad, best = { 0 }, *r;
327 list = tdb->last_zone * (tdb->header.v.free_buckets+1) + bucket;
329 /* Lock this list. */
330 if (tdb_lock_free_list(tdb, list, TDB_LOCK_WAIT) == -1) {
334 prev = free_list_off(tdb, list);
335 off = tdb_read_off(tdb, prev);
337 if (unlikely(off == TDB_OFF_ERR))
340 best.data_len = -1ULL;
344 /* Walk the list to see if any are large enough, getting less fussy
348 off = tdb_read_off(tdb, prev);
349 if (unlikely(off == TDB_OFF_ERR))
352 r = tdb_get(tdb, off, &pad, sizeof(*r));
355 if (r->magic != TDB_FREE_MAGIC) {
356 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
357 "lock_and_alloc: %llu non-free 0x%llx\n",
358 (long long)off, (long long)r->magic);
362 if (r->data_len >= size && r->data_len < best.data_len) {
367 if (best.data_len < size * multiplier && best_off) {
368 /* We're happy with this size: take it. */
369 if (remove_from_list(tdb, list, &best) != 0)
371 tdb_unlock_free_list(tdb, list);
373 if (to_used_record(tdb, best_off, size, best.data_len,
381 /* Since we're going slow anyway, try coalescing here. */
382 switch (coalesce(tdb, off, list, r->data_len)) {
384 /* This has already unlocked on error. */
387 /* This has unlocked list, restart. */
392 tdb_unlock_free_list(tdb, list);
396 tdb_unlock_free_list(tdb, list);
400 /* We want a really big chunk. Look through every zone's oversize bucket */
401 static tdb_off_t huge_alloc(struct tdb_context *tdb, size_t size,
407 for (i = 0; i < tdb->header.v.num_zones; i++) {
408 /* Try getting one from list. */
409 off = lock_and_alloc(tdb, tdb->header.v.free_buckets,
411 if (off == TDB_OFF_ERR)
415 /* FIXME: Coalesce! */
417 } while (tdb_expand(tdb, 0, size, false) == 0);
422 static tdb_off_t get_free(struct tdb_context *tdb, size_t size,
425 tdb_off_t off, bucket;
426 unsigned int num_empty, step = 0;
428 bucket = size_to_bucket(tdb, size);
430 /* If we're after something bigger than a single zone, handle
432 if (unlikely(sizeof(struct tdb_used_record) + size
433 >= (1ULL << tdb->header.v.zone_bits))) {
434 return huge_alloc(tdb, size, actual);
437 /* Number of zones we search is proportional to the log of them. */
438 for (num_empty = 0; num_empty < fls64(tdb->header.v.num_zones);
442 /* Start at exact size bucket, and search up... */
443 for (b = bucket; b <= tdb->header.v.num_zones; b++) {
444 b = find_free_head(tdb, b);
446 /* Non-empty list? Try getting block. */
447 if (b <= tdb->header.v.num_zones) {
448 /* Try getting one from list. */
449 off = lock_and_alloc(tdb, b, size, actual);
450 if (off == TDB_OFF_ERR)
454 /* Didn't work. Try next bucket. */
458 /* Try another zone, at pseudo random. Avoid duplicates by
459 using an odd step. */
461 step = ((quick_random(tdb)) % 65536) * 2 + 1;
462 tdb->last_zone = (tdb->last_zone + step)
463 % tdb->header.v.num_zones;
468 int set_header(struct tdb_context *tdb,
469 struct tdb_used_record *rec,
470 uint64_t keylen, uint64_t datalen,
471 uint64_t actuallen, uint64_t hash)
473 uint64_t keybits = (fls64(keylen) + 1) / 2;
475 /* Use top bits of hash, so it's independent of hash table size. */
477 = (actuallen - (keylen + datalen))
478 | ((hash >> 53) << 32)
481 rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
483 /* Encoding can fail on big values. */
484 if (rec_key_length(rec) != keylen
485 || rec_data_length(rec) != datalen
486 || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
487 tdb->ecode = TDB_ERR_IO;
488 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
489 "Could not encode k=%llu,d=%llu,a=%llu\n",
490 (long long)keylen, (long long)datalen,
491 (long long)actuallen);
497 static tdb_len_t adjust_size(size_t keylen, size_t datalen, bool growing)
499 tdb_len_t size = keylen + datalen;
501 if (size < MIN_DATA_LEN)
504 /* Overallocate if this is coming from an enlarging store. */
508 /* Round to next uint64_t boundary. */
509 return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
512 /* If this fails, try tdb_expand. */
513 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
514 uint64_t hash, bool growing)
517 tdb_len_t size, actual;
518 struct tdb_used_record rec;
520 /* We don't want header to change during this! */
521 assert(tdb->header_uptodate);
523 size = adjust_size(keylen, datalen, growing);
525 off = get_free(tdb, size, &actual);
526 if (unlikely(off == TDB_OFF_ERR || off == 0))
529 /* Some supergiant values can't be encoded. */
530 if (set_header(tdb, &rec, keylen, datalen, actual, hash) != 0) {
531 add_free_record(tdb, off, sizeof(rec) + actual);
535 if (tdb_write_convert(tdb, off, &rec, sizeof(rec)) != 0)
541 static bool larger_buckets_might_help(struct tdb_context *tdb)
543 /* If our buckets are already covering 1/8 of a zone, don't
544 * bother (note: might become an 1/16 of a zone if we double
546 tdb_len_t size = (1ULL << tdb->header.v.zone_bits) / 8;
548 if (size >= MIN_DATA_LEN
549 && size_to_bucket(tdb, size) < tdb->header.v.free_buckets) {
553 /* FIXME: Put stats in tdb_context or examine db itself! */
554 /* It's fairly cheap to do as we expand database. */
558 static bool zones_happy(struct tdb_context *tdb)
560 /* FIXME: look at distribution of zones. */
564 /* Expand the database. */
565 int tdb_expand(struct tdb_context *tdb, tdb_len_t klen, tdb_len_t dlen,
568 uint64_t new_num_buckets, new_num_zones, new_zone_bits;
569 uint64_t i, old_num_total, old_num_zones, old_size, old_zone_bits;
570 tdb_len_t add, freebucket_size, needed;
571 tdb_off_t off, old_free_off;
572 const tdb_off_t *oldf;
573 struct tdb_used_record fhdr;
575 /* We need room for the record header too. */
576 needed = sizeof(struct tdb_used_record)
577 + adjust_size(klen, dlen, growing);
579 /* tdb_allrecord_lock will update header; did zones change? */
580 old_zone_bits = tdb->header.v.zone_bits;
581 old_num_zones = tdb->header.v.num_zones;
583 /* FIXME: this is overkill. An expand lock? */
584 if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
587 /* Someone may have expanded for us. */
588 if (old_zone_bits != tdb->header.v.zone_bits
589 || old_num_zones != tdb->header.v.num_zones)
592 /* They may have also expanded the underlying size (otherwise we'd
593 * have expanded our mmap to look at those offsets already). */
594 old_size = tdb->map_size;
595 tdb->methods->oob(tdb, tdb->map_size + 1, true);
596 if (tdb->map_size != old_size)
599 /* Did we enlarge zones without enlarging file? */
600 if (tdb->map_size < tdb->header.v.num_zones<<tdb->header.v.zone_bits) {
601 add = (tdb->header.v.num_zones<<tdb->header.v.zone_bits)
603 /* Updates tdb->map_size. */
604 if (tdb->methods->expand_file(tdb, add) == -1)
606 if (add_free_record(tdb, tdb->map_size - add, add) == -1)
609 /* Allocate from this zone. */
610 tdb->last_zone = zone_of(tdb, tdb->map_size - add);
615 /* Slow path. Should we increase the number of buckets? */
616 new_num_buckets = tdb->header.v.free_buckets;
617 if (larger_buckets_might_help(tdb))
620 /* Now we'll need room for the new free buckets, too. Assume
621 * worst case (zones expand). */
622 needed += sizeof(fhdr)
623 + ((tdb->header.v.num_zones+1)
624 * (new_num_buckets+1) * sizeof(tdb_off_t));
626 /* If we need less that one zone, and they're working well, just add
628 if (needed < (1UL<<tdb->header.v.zone_bits) && zones_happy(tdb)) {
629 new_num_zones = tdb->header.v.num_zones+1;
630 new_zone_bits = tdb->header.v.zone_bits;
631 add = 1ULL << tdb->header.v.zone_bits;
633 /* Increase the zone size. */
634 new_num_zones = tdb->header.v.num_zones;
635 new_zone_bits = tdb->header.v.zone_bits+1;
636 while ((new_num_zones << new_zone_bits) - tdb->map_size
641 /* We expand by enough zones to meet the need. */
642 add = (needed + (1ULL << new_zone_bits)-1)
643 & ~((1ULL << new_zone_bits)-1);
646 /* Updates tdb->map_size. */
647 if (tdb->methods->expand_file(tdb, add) == -1)
650 /* Use first part as new free bucket array. */
651 off = tdb->map_size - add;
652 freebucket_size = new_num_zones
653 * (new_num_buckets + 1) * sizeof(tdb_off_t);
656 if (set_header(tdb, &fhdr, 0, freebucket_size, freebucket_size, 0))
658 if (tdb_write_convert(tdb, off, &fhdr, sizeof(fhdr)) == -1)
661 /* Adjust off to point to start of buckets, add to be remainder. */
662 add -= freebucket_size + sizeof(fhdr);
665 /* Access the old zones. */
666 old_num_total = tdb->header.v.num_zones*(tdb->header.v.free_buckets+1);
667 old_free_off = tdb->header.v.free_off;
668 oldf = tdb_access_read(tdb, old_free_off,
669 old_num_total * sizeof(tdb_off_t));
673 /* Switch to using our new zone. */
674 if (zero_out(tdb, off, new_num_zones * (new_num_buckets + 1)) == -1)
676 tdb->header.v.free_off = off;
677 tdb->header.v.num_zones = new_num_zones;
678 tdb->header.v.free_buckets = new_num_buckets;
680 /* FIXME: If zone size hasn't changed, can simply copy pointers. */
681 /* FIXME: Coalesce? */
682 for (i = 0; i < old_num_total; i++) {
684 struct tdb_free_record rec;
687 for (off = oldf[i]; off; off = next) {
688 if (tdb_read_convert(tdb, off, &rec, sizeof(rec)))
691 list = zone_of(tdb, off)
692 * (tdb->header.v.free_buckets+1)
693 + size_to_bucket(tdb, rec.data_len);
696 if (enqueue_in_free(tdb, list, off, &rec) == -1)
702 /* Free up the old free buckets. */
703 old_free_off -= sizeof(fhdr);
704 if (tdb_read_convert(tdb, old_free_off, &fhdr, sizeof(fhdr)) == -1)
706 if (add_free_record(tdb, old_free_off,
707 rec_data_length(&fhdr)+rec_extra_padding(&fhdr)))
710 /* Add the rest as a new free record. */
711 if (add_free_record(tdb, tdb->map_size - add, add) == -1)
714 /* Start allocating from where the new space is. */
715 tdb->last_zone = zone_of(tdb, tdb->map_size - add);
716 tdb_access_release(tdb, oldf);
718 tdb_allrecord_unlock(tdb, F_WRLCK);
722 tdb_access_release(tdb, oldf);
724 tdb_allrecord_unlock(tdb, F_WRLCK);