2 Trivial Database 2: free list/block handling
3 Copyright (C) Rusty Russell 2010
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 3 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <ccan/likely/likely.h>
20 #include <ccan/ilog/ilog.h>
25 static unsigned fls64(uint64_t val)
30 static unsigned ffs64(uint64_t val)
32 #if HAVE_BUILTIN_FFSLL
33 return __builtin_ffsll(val);
40 if (!(val & 0xffffffff)) {
44 if (!(val & 0xffff)) {
68 /* In which bucket would we find a particular record size? (ignoring header) */
69 unsigned int size_to_bucket(unsigned int zone_bits, tdb_len_t data_len)
73 /* We can't have records smaller than this. */
74 assert(data_len >= TDB_MIN_DATA_LEN);
76 /* Ignoring the header... */
77 if (data_len - TDB_MIN_DATA_LEN <= 64) {
78 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 8. */
79 bucket = (data_len - TDB_MIN_DATA_LEN) / 8;
81 /* After that we go power of 2. */
82 bucket = fls64(data_len - TDB_MIN_DATA_LEN) + 2;
85 if (unlikely(bucket > BUCKETS_FOR_ZONE(zone_bits)))
86 bucket = BUCKETS_FOR_ZONE(zone_bits);
90 /* Binary search for the zone for this offset. */
91 static tdb_off_t off_to_zone(struct tdb_context *tdb, tdb_off_t off,
92 struct free_zone_header *zhdr)
96 start = sizeof(struct tdb_header);
97 end = start + (1ULL << fls64(tdb->map_size - start));
100 if (tdb_read_convert(tdb, start, zhdr, sizeof(*zhdr)) == -1)
103 /* Is it inside this zone? */
104 if (off < start + (1ULL << zhdr->zone_bits))
107 /* In practice, start + end won't overflow. */
108 if (off >= (start + end) / 2)
109 start = (start + end) / 2;
111 end = (start + end) / 2;
115 static tdb_off_t last_zone(struct tdb_context *tdb,
116 struct free_zone_header *zhdr)
118 return off_to_zone(tdb, tdb->map_size - 1, zhdr);
121 int tdb_zone_init(struct tdb_context *tdb)
124 uint64_t randoff = 0;
126 /* We start in a random zone, to spread the load. */
127 for (i = 0; i < 64; i += fls64(RAND_MAX))
128 randoff ^= ((uint64_t)random()) << i;
129 randoff = sizeof(struct tdb_header)
130 + (randoff % (tdb->map_size - sizeof(struct tdb_header)));
132 tdb->zone_off = off_to_zone(tdb, randoff, &tdb->zhdr);
133 if (tdb->zone_off == TDB_OFF_ERR)
138 /* Where's the header, given a zone size of 1 << zone_bits? */
139 static tdb_off_t zone_off(tdb_off_t off, unsigned int zone_bits)
141 off -= sizeof(struct tdb_header);
142 return (off & ~((1ULL << zone_bits) - 1)) + sizeof(struct tdb_header);
145 /* Offset of a given bucket. */
146 /* FIXME: bucket can be "unsigned" everywhere, or even uint8/16. */
147 tdb_off_t bucket_off(tdb_off_t zone_off, tdb_off_t bucket)
150 + sizeof(struct free_zone_header)
151 + bucket * sizeof(tdb_off_t);
154 /* Returns free_buckets + 1, or list number to search. */
155 static tdb_off_t find_free_head(struct tdb_context *tdb, tdb_off_t bucket)
157 /* Speculatively search for a non-zero bucket. */
158 return tdb_find_nonzero_off(tdb, bucket_off(tdb->zone_off, 0),
160 BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits) + 1);
163 /* Remove from free bucket. */
164 static int remove_from_list(struct tdb_context *tdb,
165 tdb_off_t b_off, tdb_off_t r_off,
166 struct tdb_free_record *r)
174 off = r->prev + offsetof(struct tdb_free_record, next);
178 if (tdb_read_off(tdb, off) != r_off) {
179 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
180 "remove_from_list: %llu bad prev in list %llu\n",
181 (long long)r_off, (long long)b_off);
186 /* r->prev->next = r->next */
187 if (tdb_write_off(tdb, off, r->next)) {
192 off = r->next + offsetof(struct tdb_free_record, prev);
193 /* r->next->prev = r->prev */
196 if (tdb_read_off(tdb, off) != r_off) {
197 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
198 "remove_from_list: %llu bad list %llu\n",
199 (long long)r_off, (long long)b_off);
204 if (tdb_write_off(tdb, off, r->prev)) {
211 /* Enqueue in this free bucket. */
212 static int enqueue_in_free(struct tdb_context *tdb,
215 struct tdb_free_record *new)
218 /* new->next = head. */
219 new->next = tdb_read_off(tdb, b_off);
220 if (new->next == TDB_OFF_ERR)
225 if (tdb_read_off(tdb,
227 + offsetof(struct tdb_free_record, prev))
229 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
230 "enqueue_in_free: %llu bad head prev %llu\n",
231 (long long)new->next, (long long)b_off);
235 /* next->prev = new. */
236 if (tdb_write_off(tdb, new->next
237 + offsetof(struct tdb_free_record, prev),
242 if (tdb_write_off(tdb, b_off, off) != 0)
245 return tdb_write_convert(tdb, off, new, sizeof(*new));
248 /* List need not be locked. */
249 int add_free_record(struct tdb_context *tdb,
250 unsigned int zone_bits,
251 tdb_off_t off, tdb_len_t len_with_header)
253 struct tdb_free_record new;
257 assert(len_with_header >= sizeof(new));
258 assert(zone_bits < 64);
260 new.magic_and_meta = TDB_FREE_MAGIC | zone_bits;
261 new.data_len = len_with_header - sizeof(struct tdb_used_record);
263 b_off = bucket_off(zone_off(off, zone_bits),
264 size_to_bucket(zone_bits, new.data_len));
265 if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) != 0)
268 ret = enqueue_in_free(tdb, b_off, off, &new);
269 tdb_unlock_free_bucket(tdb, b_off);
273 static size_t adjust_size(size_t keylen, size_t datalen, bool want_extra)
275 size_t size = keylen + datalen;
277 /* We want at least 50% growth for data. */
281 if (size < TDB_MIN_DATA_LEN)
282 size = TDB_MIN_DATA_LEN;
284 /* Round to next uint64_t boundary. */
285 return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
288 /* If we have enough left over to be useful, split that off. */
289 static size_t record_leftover(size_t keylen, size_t datalen,
290 bool want_extra, size_t total_len)
294 /* We might *want* extra, but not have it, so leftover is negative. */
295 leftover = total_len - adjust_size(keylen, datalen, want_extra);
296 if (leftover < (ssize_t)sizeof(struct tdb_free_record))
299 /* If we want extra anwyay, don't split unless we have 2x size. */
300 if (want_extra && leftover <= datalen / 2)
306 /* Note: we unlock the current bucket if we coalesce or fail. */
307 static int coalesce(struct tdb_context *tdb,
308 tdb_off_t zone_off, unsigned zone_bits,
309 tdb_off_t off, tdb_off_t b_off, tdb_len_t data_len)
311 struct tdb_free_record pad, *r;
312 tdb_off_t zone_end, end;
314 end = off + sizeof(struct tdb_used_record) + data_len;
315 zone_end = zone_off + (1ULL << zone_bits);
317 if (tdb->methods->oob(tdb, zone_end, true))
318 zone_end = tdb->map_size;
320 while (end < zone_end) {
323 /* FIXME: do tdb_get here and below really win? */
324 r = tdb_get(tdb, end, &pad, sizeof(pad));
328 if (frec_magic(r) != TDB_FREE_MAGIC)
331 nb_off = bucket_off(zone_off,
332 size_to_bucket(zone_bits, r->data_len));
334 /* We may be violating lock order here, so best effort. */
335 if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT) == -1)
338 /* Now we have lock, re-check. */
339 r = tdb_get(tdb, end, &pad, sizeof(pad));
341 tdb_unlock_free_bucket(tdb, nb_off);
345 if (unlikely(frec_magic(r) != TDB_FREE_MAGIC)) {
346 tdb_unlock_free_bucket(tdb, nb_off);
350 if (unlikely(bucket_off(zone_off,
351 size_to_bucket(zone_bits, r->data_len))
353 tdb_unlock_free_bucket(tdb, nb_off);
357 if (remove_from_list(tdb, nb_off, end, r) == -1) {
358 tdb_unlock_free_bucket(tdb, nb_off);
362 end += sizeof(struct tdb_used_record) + r->data_len;
363 tdb_unlock_free_bucket(tdb, nb_off);
366 /* Didn't find any adjacent free? */
367 if (end == off + sizeof(struct tdb_used_record) + data_len)
370 /* OK, expand record */
371 r = tdb_get(tdb, off, &pad, sizeof(pad));
375 if (r->data_len != data_len) {
376 tdb->ecode = TDB_ERR_CORRUPT;
377 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
378 "coalesce: expected data len %llu not %llu\n",
379 (long long)data_len, (long long)r->data_len);
383 if (remove_from_list(tdb, b_off, off, r) == -1)
386 r = tdb_access_write(tdb, off, sizeof(*r), true);
390 /* We have to drop this to avoid deadlocks, so make sure record
391 * doesn't get coalesced by someone else! */
392 r->magic_and_meta = TDB_COALESCING_MAGIC | zone_bits;
393 r->data_len = end - off - sizeof(struct tdb_used_record);
394 if (tdb_access_commit(tdb, r) != 0)
397 tdb_unlock_free_bucket(tdb, b_off);
399 if (add_free_record(tdb, zone_bits, off, end - off) == -1)
404 /* To unify error paths, we *always* unlock bucket on error. */
405 tdb_unlock_free_bucket(tdb, b_off);
409 /* We need size bytes to put our key and data in. */
410 static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
414 size_t keylen, size_t datalen,
418 tdb_off_t off, b_off,best_off;
419 struct tdb_free_record pad, best = { 0 }, *r;
421 size_t size = keylen + datalen;
424 b_off = bucket_off(zone_off, bucket);
426 /* FIXME: Try non-blocking wait first, to measure contention.
427 * If we're contented, try switching zones, and don't enlarge zone
428 * next time (we want more zones). */
429 /* Lock this bucket. */
430 if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == -1) {
434 best.data_len = -1ULL;
437 /* Get slack if we're after extra. */
443 /* Walk the list to see if any are large enough, getting less fussy
445 off = tdb_read_off(tdb, b_off);
446 if (unlikely(off == TDB_OFF_ERR))
450 /* FIXME: Does tdb_get win anything here? */
451 r = tdb_get(tdb, off, &pad, sizeof(*r));
455 if (frec_magic(r) != TDB_FREE_MAGIC) {
456 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
457 "lock_and_alloc: %llu non-free 0x%llx\n",
458 (long long)off, (long long)r->magic_and_meta);
462 if (r->data_len >= size && r->data_len < best.data_len) {
467 if (best.data_len < size * multiplier && best_off)
472 /* Since we're going slow anyway, try coalescing here. */
473 switch (coalesce(tdb, zone_off, zone_bits, off, b_off,
476 /* This has already unlocked on error. */
479 /* This has unlocked list, restart. */
485 /* If we found anything at all, use it. */
487 struct tdb_used_record rec;
490 /* We're happy with this size: take it. */
491 if (remove_from_list(tdb, b_off, best_off, &best) != 0)
494 leftover = record_leftover(keylen, datalen, want_extra,
497 /* We need to mark non-free before we drop lock, otherwise
498 * coalesce() could try to merge it! */
499 if (set_header(tdb, &rec, keylen, datalen,
500 best.data_len - leftover,
501 hashlow, zone_bits) != 0)
504 if (tdb_write_convert(tdb, best_off, &rec, sizeof(rec)) != 0)
507 tdb_unlock_free_bucket(tdb, b_off);
510 if (add_free_record(tdb, zone_bits,
511 best_off + sizeof(rec)
512 + best.data_len - leftover,
519 tdb_unlock_free_bucket(tdb, b_off);
523 tdb_unlock_free_bucket(tdb, b_off);
527 static bool next_zone(struct tdb_context *tdb)
529 tdb_off_t next = tdb->zone_off + (1ULL << tdb->zhdr.zone_bits);
531 /* We must have a header. */
532 if (tdb->methods->oob(tdb, next + sizeof(tdb->zhdr), true))
535 tdb->zone_off = next;
536 return tdb_read_convert(tdb, next, &tdb->zhdr, sizeof(tdb->zhdr)) == 0;
539 /* Offset returned is within current zone (which it may alter). */
540 static tdb_off_t get_free(struct tdb_context *tdb,
541 size_t keylen, size_t datalen, bool want_extra,
544 tdb_off_t start_zone = tdb->zone_off, off;
545 bool wrapped = false;
546 size_t size = adjust_size(keylen, datalen, want_extra);
548 /* If they are growing, add 50% to get to higher bucket. */
552 /* FIXME: If we don't get a hit in the first bucket we want,
553 * try changing zones for next time. That should help wear
554 * zones evenly, so we don't need to search all of them before
556 while (!wrapped || tdb->zone_off != start_zone) {
559 /* Shortcut for really huge allocations... */
560 if ((size >> tdb->zhdr.zone_bits) != 0)
563 /* Start at exact size bucket, and search up... */
564 b = size_to_bucket(tdb->zhdr.zone_bits, size);
565 for (b = find_free_head(tdb, b);
566 b <= BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits);
567 b += find_free_head(tdb, b + 1)) {
568 /* Try getting one from list. */
569 off = lock_and_alloc(tdb, tdb->zone_off,
571 b, keylen, datalen, want_extra,
573 if (off == TDB_OFF_ERR)
577 /* Didn't work. Try next bucket. */
581 /* Didn't work, try next zone, if it exists. */
582 if (!next_zone(tdb)) {
584 tdb->zone_off = sizeof(struct tdb_header);
585 if (tdb_read_convert(tdb, tdb->zone_off,
586 &tdb->zhdr, sizeof(tdb->zhdr))) {
594 int set_header(struct tdb_context *tdb,
595 struct tdb_used_record *rec,
596 uint64_t keylen, uint64_t datalen,
597 uint64_t actuallen, unsigned hashlow,
598 unsigned int zone_bits)
600 uint64_t keybits = (fls64(keylen) + 1) / 2;
602 /* Use bottom bits of hash, so it's independent of hash table size. */
605 | ((hashlow & ((1 << 5)-1)) << 6)
606 | ((actuallen - (keylen + datalen)) << 11)
609 rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
611 /* Encoding can fail on big values. */
612 if (rec_key_length(rec) != keylen
613 || rec_data_length(rec) != datalen
614 || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
615 tdb->ecode = TDB_ERR_IO;
616 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
617 "Could not encode k=%llu,d=%llu,a=%llu\n",
618 (long long)keylen, (long long)datalen,
619 (long long)actuallen);
625 static bool zones_contended(struct tdb_context *tdb)
630 /* Assume we want buckets up to the comfort factor. */
631 static tdb_len_t overhead(unsigned int zone_bits)
633 return sizeof(struct free_zone_header)
634 + (BUCKETS_FOR_ZONE(zone_bits) + 1) * sizeof(tdb_off_t);
637 /* Expand the database (by adding a zone). */
638 static int tdb_expand(struct tdb_context *tdb, tdb_len_t size)
642 unsigned int num_buckets, zone_bits;
643 tdb_len_t wanted, expand;
644 struct free_zone_header zhdr;
646 /* We need room for the record header too. */
647 wanted = sizeof(struct tdb_used_record) + size;
649 /* Only one person can expand file at a time. */
650 if (tdb_lock_expand(tdb, F_WRLCK) != 0)
653 /* Someone else may have expanded the file, so retry. */
654 old_size = tdb->map_size;
655 tdb->methods->oob(tdb, tdb->map_size + 1, true);
656 if (tdb->map_size != old_size)
659 /* Treat last zone as minimum reasonable zone size. */
660 off = last_zone(tdb, &zhdr);
661 if (off == TDB_OFF_ERR)
664 /* Zone isn't fully expanded? */
665 if (tdb->map_size < off + (1ULL << zhdr.zone_bits)) {
666 expand = off + (1ULL << zhdr.zone_bits) - tdb->map_size;
667 /* Expand more than we want. */
668 if (expand > (wanted << TDB_COMFORT_FACTOR_BITS))
669 expand = (wanted << TDB_COMFORT_FACTOR_BITS);
670 if (tdb->methods->expand_file(tdb, expand) == -1)
672 /* We need to drop this lock before adding free record. */
673 tdb_unlock_expand(tdb, F_WRLCK);
675 /* Allocate from here. */
679 /* FIXME: If this isn't sufficient, we search again... */
680 return add_free_record(tdb, zhdr.zone_bits,
681 tdb->map_size - expand, expand);
684 /* We are never allowed to cross a power-of-two boundary, and our
685 * minimum zone size is 1 << INITIAL_ZONE_BITS.
687 * If our filesize is 128k, we can add a 64k or a 128k zone. If it's
688 * 192k, we can only add a 64k zone.
690 * In other words, our max zone size is (1 << (ffs(filesize) - 1)) */
691 zone_bits = ffs64(old_size - sizeof(struct tdb_header)) - 1;
692 assert(zone_bits >= INITIAL_ZONE_BITS);
694 /* Big zones generally good, but more zones wanted if contended. */
695 if (zones_contended(tdb)) {
696 /* If it suffices, make zone same size as last one. */
697 if (zhdr.zone_bits < zone_bits
698 && (1ULL << zhdr.zone_bits) >= overhead(zone_bits)+wanted)
699 zone_bits = zhdr.zone_bits;
702 zhdr.zone_bits = zone_bits;
703 num_buckets = BUCKETS_FOR_ZONE(zone_bits);
705 /* Expand the file by more than we need right now. */
706 expand = 1ULL << zone_bits;
707 if (expand > overhead(zone_bits) + (wanted << TDB_COMFORT_FACTOR_BITS))
708 expand = overhead(zone_bits)
709 + (wanted << TDB_COMFORT_FACTOR_BITS);
711 if (tdb->methods->expand_file(tdb, expand) == -1)
714 /* Write new zone header (at old end). */
716 if (tdb_write_convert(tdb, off, &zhdr, sizeof(zhdr)) == -1)
719 /* Now write empty buckets. */
721 if (zero_out(tdb, off, (num_buckets+1) * sizeof(tdb_off_t)) == -1)
723 off += (num_buckets+1) * sizeof(tdb_off_t);
725 /* Now add the rest as our free record. */
726 if (add_free_record(tdb, zone_bits, off, expand - overhead(zone_bits))
730 /* Try allocating from this zone now. */
731 tdb->zone_off = old_size;
735 tdb_unlock_expand(tdb, F_WRLCK);
739 tdb_unlock_expand(tdb, F_WRLCK);
743 /* This won't fail: it will expand the database if it has to. */
744 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
745 uint64_t hash, bool growing)
749 /* We can't hold pointers during this: we could unmap! */
750 assert(!tdb->direct_access);
753 off = get_free(tdb, keylen, datalen, growing, hash);
754 if (likely(off != 0))
757 if (tdb_expand(tdb, adjust_size(keylen, datalen, growing)))