2 Trivial Database 2: free list/block handling
3 Copyright (C) Rusty Russell 2010
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 3 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <ccan/likely/likely.h>
24 /* We have to be able to fit a free record here. */
25 #define MIN_DATA_LEN \
26 (sizeof(struct tdb_free_record) - sizeof(struct tdb_used_record))
28 /* We have a series of free lists, each one covering a "zone" of the file.
30 * For each zone we have a series of per-size buckets, and a final bucket for
33 * It's possible to move the free_list_head, but *only* under the allrecord
35 static tdb_off_t free_list_off(struct tdb_context *tdb, unsigned int list)
37 return tdb->header.v.free_off + list * sizeof(tdb_off_t);
40 /* We're a library: playing with srandom() is unfriendly. srandom_r
41 * probably lacks portability. We don't need very random here. */
42 static unsigned int quick_random(struct tdb_context *tdb)
44 return getpid() + time(NULL) + (unsigned long)tdb;
47 /* Start by using a random zone to spread the load. */
48 void tdb_zone_init(struct tdb_context *tdb)
51 * We read num_zones without a proper lock, so we could have
52 * gotten a partial read. Since zone_bits is 1 byte long, we
53 * can trust that; even if it's increased, the number of zones
54 * cannot have decreased. And using the map size means we
55 * will not start with a zone which hasn't been filled yet.
57 tdb->last_zone = quick_random(tdb)
58 % ((tdb->map_size >> tdb->header.v.zone_bits) + 1);
61 static unsigned fls64(uint64_t val)
64 if (val <= ULONG_MAX) {
65 /* This is significantly faster! */
66 return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
73 if (!(val & 0xffffffff00000000ull)) {
77 if (!(val & 0xffff000000000000ull)) {
81 if (!(val & 0xff00000000000000ull)) {
85 if (!(val & 0xf000000000000000ull)) {
89 if (!(val & 0xc000000000000000ull)) {
93 if (!(val & 0x8000000000000000ull)) {
103 /* In which bucket would we find a particular record size? (ignoring header) */
104 unsigned int size_to_bucket(struct tdb_context *tdb, tdb_len_t data_len)
108 /* We can't have records smaller than this. */
109 assert(data_len >= MIN_DATA_LEN);
111 /* Ignoring the header... */
112 if (data_len - MIN_DATA_LEN <= 64) {
113 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 6. */
114 bucket = (data_len - MIN_DATA_LEN) / 8;
116 /* After that we go power of 2. */
117 bucket = fls64(data_len - MIN_DATA_LEN) + 2;
120 if (unlikely(bucket > tdb->header.v.free_buckets))
121 bucket = tdb->header.v.free_buckets;
125 /* What zone does a block belong in? */
126 tdb_off_t zone_of(struct tdb_context *tdb, tdb_off_t off)
128 assert(tdb->header_uptodate);
130 return off >> tdb->header.v.zone_bits;
133 /* Returns free_buckets + 1, or list number to search. */
134 static tdb_off_t find_free_head(struct tdb_context *tdb, tdb_off_t bucket)
136 tdb_off_t first, off;
138 /* Speculatively search for a non-zero bucket. */
139 first = tdb->last_zone * (tdb->header.v.free_buckets+1) + bucket;
140 off = tdb_find_nonzero_off(tdb, free_list_off(tdb, first),
141 tdb->header.v.free_buckets + 1 - bucket);
145 static int remove_from_list(struct tdb_context *tdb,
146 tdb_off_t list, struct tdb_free_record *r)
152 off = free_list_off(tdb, list);
154 off = r->prev + offsetof(struct tdb_free_record, next);
156 /* r->prev->next = r->next */
157 if (tdb_write_off(tdb, off, r->next)) {
162 off = r->next + offsetof(struct tdb_free_record, prev);
163 /* r->next->prev = r->prev */
164 if (tdb_write_off(tdb, off, r->prev)) {
171 /* Enqueue in this free list. */
172 static int enqueue_in_free(struct tdb_context *tdb,
175 struct tdb_free_record *new)
178 /* new->next = head. */
179 new->next = tdb_read_off(tdb, free_list_off(tdb, list));
180 if (new->next == TDB_OFF_ERR)
184 /* next->prev = new. */
185 if (tdb_write_off(tdb, new->next
186 + offsetof(struct tdb_free_record, prev),
191 if (tdb_write_off(tdb, free_list_off(tdb, list), off) != 0)
194 return tdb_write_convert(tdb, off, new, sizeof(*new));
197 /* List isn't locked. */
198 int add_free_record(struct tdb_context *tdb,
199 tdb_off_t off, tdb_len_t len_with_header)
201 struct tdb_free_record new;
205 assert(len_with_header >= sizeof(new));
207 new.magic = TDB_FREE_MAGIC;
208 new.data_len = len_with_header - sizeof(struct tdb_used_record);
210 tdb->last_zone = zone_of(tdb, off);
211 list = tdb->last_zone * (tdb->header.v.free_buckets+1)
212 + size_to_bucket(tdb, new.data_len);
214 if (tdb_lock_free_list(tdb, list, TDB_LOCK_WAIT) != 0)
217 ret = enqueue_in_free(tdb, list, off, &new);
218 tdb_unlock_free_list(tdb, list);
222 /* If we have enough left over to be useful, split that off. */
223 static int to_used_record(struct tdb_context *tdb,
229 struct tdb_used_record used;
232 leftover = total_len - needed;
233 if (leftover < sizeof(struct tdb_free_record))
236 *actual = total_len - leftover;
239 if (add_free_record(tdb, off + sizeof(used) + *actual,
246 /* Note: we unlock the current list if we coalesce or fail. */
247 static int coalesce(struct tdb_context *tdb, tdb_off_t off,
248 tdb_off_t list, tdb_len_t data_len)
250 struct tdb_free_record pad, *r;
251 tdb_off_t end = off + sizeof(struct tdb_used_record) + data_len;
253 while (!tdb->methods->oob(tdb, end + sizeof(*r), 1)) {
256 r = tdb_get(tdb, end, &pad, sizeof(pad));
260 if (r->magic != TDB_FREE_MAGIC)
263 nlist = zone_of(tdb, end) * (tdb->header.v.free_buckets+1)
264 + size_to_bucket(tdb, r->data_len);
266 /* We may be violating lock order here, so best effort. */
267 if (tdb_lock_free_list(tdb, nlist, TDB_LOCK_NOWAIT) == -1)
270 /* Now we have lock, re-check. */
271 r = tdb_get(tdb, end, &pad, sizeof(pad));
273 tdb_unlock_free_list(tdb, nlist);
277 if (unlikely(r->magic != TDB_FREE_MAGIC)) {
278 tdb_unlock_free_list(tdb, nlist);
282 if (remove_from_list(tdb, nlist, r) == -1) {
283 tdb_unlock_free_list(tdb, nlist);
287 end += sizeof(struct tdb_used_record) + r->data_len;
288 tdb_unlock_free_list(tdb, nlist);
291 /* Didn't find any adjacent free? */
292 if (end == off + sizeof(struct tdb_used_record) + data_len)
295 /* OK, expand record */
296 r = tdb_get(tdb, off, &pad, sizeof(pad));
300 if (r->data_len != data_len) {
301 tdb->ecode = TDB_ERR_CORRUPT;
302 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
303 "coalesce: expected data len %llu not %llu\n",
304 (long long)data_len, (long long)r->data_len);
308 if (remove_from_list(tdb, list, r) == -1)
311 /* We have to drop this to avoid deadlocks. */
312 tdb_unlock_free_list(tdb, list);
314 if (add_free_record(tdb, off, end - off) == -1)
319 /* To unify error paths, we *always* unlock list. */
320 tdb_unlock_free_list(tdb, list);
324 /* We need size bytes to put our key and data in. */
325 static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
326 tdb_off_t bucket, size_t size,
330 tdb_off_t off, best_off;
331 struct tdb_free_record pad, best = { 0 }, *r;
335 list = tdb->last_zone * (tdb->header.v.free_buckets+1) + bucket;
337 /* Lock this list. */
338 if (tdb_lock_free_list(tdb, list, TDB_LOCK_WAIT) == -1) {
342 best.data_len = -1ULL;
346 /* Walk the list to see if any are large enough, getting less fussy
348 off = tdb_read_off(tdb, free_list_off(tdb, list));
349 if (unlikely(off == TDB_OFF_ERR))
353 r = tdb_get(tdb, off, &pad, sizeof(*r));
357 if (r->magic != TDB_FREE_MAGIC) {
358 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
359 "lock_and_alloc: %llu non-free 0x%llx\n",
360 (long long)off, (long long)r->magic);
364 if (r->data_len >= size && r->data_len < best.data_len) {
369 if (best.data_len < size * multiplier && best_off)
374 /* Since we're going slow anyway, try coalescing here. */
375 switch (coalesce(tdb, off, list, r->data_len)) {
377 /* This has already unlocked on error. */
380 /* This has unlocked list, restart. */
386 /* If we found anything at all, use it. */
389 /* We're happy with this size: take it. */
390 if (remove_from_list(tdb, list, &best) != 0)
392 tdb_unlock_free_list(tdb, list);
394 if (to_used_record(tdb, best_off, size, best.data_len,
401 tdb_unlock_free_list(tdb, list);
405 tdb_unlock_free_list(tdb, list);
409 /* We want a really big chunk. Look through every zone's oversize bucket */
410 static tdb_off_t huge_alloc(struct tdb_context *tdb, size_t size,
415 for (i = 0; i < tdb->header.v.num_zones; i++) {
416 /* Try getting one from list. */
417 off = lock_and_alloc(tdb, tdb->header.v.free_buckets,
419 if (off == TDB_OFF_ERR)
423 /* FIXME: Coalesce! */
428 static tdb_off_t get_free(struct tdb_context *tdb, size_t size,
431 tdb_off_t off, bucket;
432 unsigned int num_empty, step = 0;
434 bucket = size_to_bucket(tdb, size);
436 /* If we're after something bigger than a single zone, handle
438 if (unlikely(sizeof(struct tdb_used_record) + size
439 >= (1ULL << tdb->header.v.zone_bits))) {
440 return huge_alloc(tdb, size, actual);
443 /* Number of zones we search is proportional to the log of them. */
444 for (num_empty = 0; num_empty < fls64(tdb->header.v.num_zones);
448 /* Start at exact size bucket, and search up... */
449 for (b = bucket; b <= tdb->header.v.free_buckets; b++) {
450 b = find_free_head(tdb, b);
452 /* Non-empty list? Try getting block. */
453 if (b <= tdb->header.v.free_buckets) {
454 /* Try getting one from list. */
455 off = lock_and_alloc(tdb, b, size, actual);
456 if (off == TDB_OFF_ERR)
460 /* Didn't work. Try next bucket. */
464 /* Try another zone, at pseudo random. Avoid duplicates by
465 using an odd step. */
467 step = ((quick_random(tdb)) % 65536) * 2 + 1;
468 tdb->last_zone = (tdb->last_zone + step)
469 % tdb->header.v.num_zones;
474 int set_header(struct tdb_context *tdb,
475 struct tdb_used_record *rec,
476 uint64_t keylen, uint64_t datalen,
477 uint64_t actuallen, uint64_t hash)
479 uint64_t keybits = (fls64(keylen) + 1) / 2;
481 /* Use top bits of hash, so it's independent of hash table size. */
483 = (actuallen - (keylen + datalen))
484 | ((hash >> 53) << 32)
487 rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
489 /* Encoding can fail on big values. */
490 if (rec_key_length(rec) != keylen
491 || rec_data_length(rec) != datalen
492 || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
493 tdb->ecode = TDB_ERR_IO;
494 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
495 "Could not encode k=%llu,d=%llu,a=%llu\n",
496 (long long)keylen, (long long)datalen,
497 (long long)actuallen);
503 static tdb_len_t adjust_size(size_t keylen, size_t datalen, bool growing)
505 tdb_len_t size = keylen + datalen;
507 if (size < MIN_DATA_LEN)
510 /* Overallocate if this is coming from an enlarging store. */
514 /* Round to next uint64_t boundary. */
515 return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
518 /* If this fails, try tdb_expand. */
519 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
520 uint64_t hash, bool growing)
523 tdb_len_t size, actual;
524 struct tdb_used_record rec;
526 /* We don't want header to change during this! */
527 assert(tdb->header_uptodate);
529 size = adjust_size(keylen, datalen, growing);
531 off = get_free(tdb, size, &actual);
532 if (unlikely(off == TDB_OFF_ERR || off == 0))
535 /* Some supergiant values can't be encoded. */
536 if (set_header(tdb, &rec, keylen, datalen, actual, hash) != 0) {
537 add_free_record(tdb, off, sizeof(rec) + actual);
541 if (tdb_write_convert(tdb, off, &rec, sizeof(rec)) != 0)
547 static bool larger_buckets_might_help(struct tdb_context *tdb)
549 /* If our buckets are already covering 1/8 of a zone, don't
550 * bother (note: might become an 1/16 of a zone if we double
552 tdb_len_t size = (1ULL << tdb->header.v.zone_bits) / 8;
554 if (size >= MIN_DATA_LEN
555 && size_to_bucket(tdb, size) < tdb->header.v.free_buckets) {
559 /* FIXME: Put stats in tdb_context or examine db itself! */
560 /* It's fairly cheap to do as we expand database. */
564 static bool zones_happy(struct tdb_context *tdb)
566 /* FIXME: look at distribution of zones. */
570 /* Returns how much extra room we get, or TDB_OFF_ERR. */
571 static tdb_len_t expand_to_fill_zones(struct tdb_context *tdb)
575 /* We can enlarge zones without enlarging file to match. */
576 add = (tdb->header.v.num_zones<<tdb->header.v.zone_bits)
578 if (add <= sizeof(struct tdb_free_record))
581 /* Updates tdb->map_size. */
582 if (tdb->methods->expand_file(tdb, add) == -1)
584 if (add_free_record(tdb, tdb->map_size - add, add) == -1)
589 static int update_zones(struct tdb_context *tdb,
590 uint64_t new_num_zones,
591 uint64_t new_zone_bits,
592 uint64_t new_num_buckets,
595 tdb_len_t freebucket_size;
596 const tdb_off_t *oldf;
597 tdb_off_t i, off, old_num_total, old_free_off;
598 struct tdb_used_record fhdr;
600 /* Updates tdb->map_size. */
601 if (tdb->methods->expand_file(tdb, add) == -1)
604 /* Use first part as new free bucket array. */
605 off = tdb->map_size - add;
606 freebucket_size = new_num_zones
607 * (new_num_buckets + 1) * sizeof(tdb_off_t);
610 if (set_header(tdb, &fhdr, 0, freebucket_size, freebucket_size, 0))
612 if (tdb_write_convert(tdb, off, &fhdr, sizeof(fhdr)) == -1)
615 /* Adjust off to point to start of buckets, add to be remainder. */
616 add -= freebucket_size + sizeof(fhdr);
619 /* Access the old zones. */
620 old_num_total = tdb->header.v.num_zones*(tdb->header.v.free_buckets+1);
621 old_free_off = tdb->header.v.free_off;
622 oldf = tdb_access_read(tdb, old_free_off,
623 old_num_total * sizeof(tdb_off_t), true);
627 /* Switch to using our new zone. */
628 if (zero_out(tdb, off, freebucket_size) == -1)
631 tdb->header.v.free_off = off;
632 tdb->header.v.num_zones = new_num_zones;
633 tdb->header.v.zone_bits = new_zone_bits;
634 tdb->header.v.free_buckets = new_num_buckets;
636 /* FIXME: If zone size hasn't changed, can simply copy pointers. */
637 /* FIXME: Coalesce? */
638 for (i = 0; i < old_num_total; i++) {
640 struct tdb_free_record rec;
643 for (off = oldf[i]; off; off = next) {
644 if (tdb_read_convert(tdb, off, &rec, sizeof(rec)))
647 list = zone_of(tdb, off)
648 * (tdb->header.v.free_buckets+1)
649 + size_to_bucket(tdb, rec.data_len);
652 if (enqueue_in_free(tdb, list, off, &rec) == -1)
657 /* Free up the old free buckets. */
658 old_free_off -= sizeof(fhdr);
659 if (tdb_read_convert(tdb, old_free_off, &fhdr, sizeof(fhdr)) == -1)
661 if (add_free_record(tdb, old_free_off,
663 + rec_data_length(&fhdr)
664 + rec_extra_padding(&fhdr)))
667 /* Add the rest as a new free record. */
668 if (add_free_record(tdb, tdb->map_size - add, add) == -1)
671 /* Start allocating from where the new space is. */
672 tdb->last_zone = zone_of(tdb, tdb->map_size - add);
673 tdb_access_release(tdb, oldf);
674 return write_header(tdb);
677 tdb_access_release(tdb, oldf);
681 /* Expand the database. */
682 int tdb_expand(struct tdb_context *tdb, tdb_len_t klen, tdb_len_t dlen,
685 uint64_t new_num_buckets, new_num_zones, new_zone_bits;
686 uint64_t old_num_zones, old_size, old_zone_bits;
687 tdb_len_t add, needed;
689 /* We need room for the record header too. */
690 needed = sizeof(struct tdb_used_record)
691 + adjust_size(klen, dlen, growing);
693 /* tdb_allrecord_lock will update header; did zones change? */
694 old_zone_bits = tdb->header.v.zone_bits;
695 old_num_zones = tdb->header.v.num_zones;
697 /* FIXME: this is overkill. An expand lock? */
698 if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
701 /* Someone may have expanded for us. */
702 if (old_zone_bits != tdb->header.v.zone_bits
703 || old_num_zones != tdb->header.v.num_zones)
706 /* They may have also expanded the underlying size (otherwise we'd
707 * have expanded our mmap to look at those offsets already). */
708 old_size = tdb->map_size;
709 tdb->methods->oob(tdb, tdb->map_size + 1, true);
710 if (tdb->map_size != old_size)
713 add = expand_to_fill_zones(tdb);
714 if (add == TDB_OFF_ERR)
718 /* Allocate from this zone. */
719 tdb->last_zone = zone_of(tdb, tdb->map_size - add);
723 /* Slow path. Should we increase the number of buckets? */
724 new_num_buckets = tdb->header.v.free_buckets;
725 if (larger_buckets_might_help(tdb))
728 /* Now we'll need room for the new free buckets, too. Assume
729 * worst case (zones expand). */
730 needed += sizeof(struct tdb_used_record)
731 + ((tdb->header.v.num_zones+1)
732 * (new_num_buckets+1) * sizeof(tdb_off_t));
734 /* If we need less that one zone, and they're working well, just add
736 if (needed < (1UL<<tdb->header.v.zone_bits) && zones_happy(tdb)) {
737 new_num_zones = tdb->header.v.num_zones+1;
738 new_zone_bits = tdb->header.v.zone_bits;
739 add = 1ULL << tdb->header.v.zone_bits;
741 /* Increase the zone size. */
742 new_num_zones = tdb->header.v.num_zones;
743 new_zone_bits = tdb->header.v.zone_bits+1;
744 while ((new_num_zones << new_zone_bits)
745 < tdb->map_size + needed) {
749 /* We expand by enough full zones to meet the need. */
750 add = ((tdb->map_size + needed + (1ULL << new_zone_bits)-1)
751 & ~((1ULL << new_zone_bits)-1))
755 if (update_zones(tdb, new_num_zones, new_zone_bits, new_num_buckets,
760 tdb_allrecord_unlock(tdb, F_WRLCK);
764 tdb_allrecord_unlock(tdb, F_WRLCK);