2 Trivial Database 2: free list/block handling
3 Copyright (C) Rusty Russell 2010
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 3 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <ccan/likely/likely.h>
20 #include <ccan/ilog/ilog.h>
25 static unsigned fls64(uint64_t val)
30 /* In which bucket would we find a particular record size? (ignoring header) */
31 unsigned int size_to_bucket(unsigned int zone_bits, tdb_len_t data_len)
35 /* We can't have records smaller than this. */
36 assert(data_len >= TDB_MIN_DATA_LEN);
38 /* Ignoring the header... */
39 if (data_len - TDB_MIN_DATA_LEN <= 64) {
40 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 8. */
41 bucket = (data_len - TDB_MIN_DATA_LEN) / 8;
43 /* After that we go power of 2. */
44 bucket = fls64(data_len - TDB_MIN_DATA_LEN) + 2;
47 if (unlikely(bucket > BUCKETS_FOR_ZONE(zone_bits)))
48 bucket = BUCKETS_FOR_ZONE(zone_bits);
52 /* Subtract 1-byte tailer and header. Then round up to next power of 2. */
53 static unsigned max_zone_bits(struct tdb_context *tdb)
55 return fls64(tdb->map_size-1-sizeof(struct tdb_header)-1) + 1;
58 /* Start by using a random zone to spread the load: returns the offset. */
59 static uint64_t random_zone(struct tdb_context *tdb)
61 struct free_zone_header zhdr;
62 tdb_off_t off = sizeof(struct tdb_header);
64 uint64_t randbits = 0;
67 for (i = 0; i < 64; i += fls64(RAND_MAX))
68 randbits ^= ((uint64_t)random()) << i;
70 /* FIXME: Does this work? Test! */
71 half_bits = max_zone_bits(tdb) - 1;
73 /* Pick left or right side (not outside file) */
75 && !tdb->methods->oob(tdb, off + (1ULL << half_bits)
76 + sizeof(zhdr), true)) {
77 off += 1ULL << half_bits;
81 if (tdb_read_convert(tdb, off, &zhdr, sizeof(zhdr)) == -1)
84 if (zhdr.zone_bits == half_bits)
88 } while (half_bits >= INITIAL_ZONE_BITS);
90 tdb->ecode = TDB_ERR_CORRUPT;
91 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
92 "random_zone: zone at %llu smaller than %u bits?",
93 (long long)off, INITIAL_ZONE_BITS);
97 int tdb_zone_init(struct tdb_context *tdb)
99 tdb->zone_off = random_zone(tdb);
100 if (tdb->zone_off == TDB_OFF_ERR)
102 if (tdb_read_convert(tdb, tdb->zone_off,
103 &tdb->zhdr, sizeof(tdb->zhdr)) == -1)
108 /* Where's the header, given a zone size of 1 << zone_bits? */
109 static tdb_off_t zone_off(tdb_off_t off, unsigned int zone_bits)
111 off -= sizeof(struct tdb_header);
112 return (off & ~((1ULL << zone_bits) - 1)) + sizeof(struct tdb_header);
115 /* Offset of a given bucket. */
116 /* FIXME: bucket can be "unsigned" everywhere, or even uint8/16. */
117 tdb_off_t bucket_off(tdb_off_t zone_off, tdb_off_t bucket)
120 + sizeof(struct free_zone_header)
121 + bucket * sizeof(tdb_off_t);
124 /* Returns free_buckets + 1, or list number to search. */
125 static tdb_off_t find_free_head(struct tdb_context *tdb, tdb_off_t bucket)
127 /* Speculatively search for a non-zero bucket. */
128 return tdb_find_nonzero_off(tdb, bucket_off(tdb->zone_off, 0),
130 BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits) + 1);
133 /* Remove from free bucket. */
134 static int remove_from_list(struct tdb_context *tdb,
135 tdb_off_t b_off, tdb_off_t r_off,
136 struct tdb_free_record *r)
144 off = r->prev + offsetof(struct tdb_free_record, next);
148 if (tdb_read_off(tdb, off) != r_off) {
149 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
150 "remove_from_list: %llu bad prev in list %llu\n",
151 (long long)r_off, (long long)b_off);
156 /* r->prev->next = r->next */
157 if (tdb_write_off(tdb, off, r->next)) {
162 off = r->next + offsetof(struct tdb_free_record, prev);
163 /* r->next->prev = r->prev */
166 if (tdb_read_off(tdb, off) != r_off) {
167 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
168 "remove_from_list: %llu bad list %llu\n",
169 (long long)r_off, (long long)b_off);
174 if (tdb_write_off(tdb, off, r->prev)) {
181 /* Enqueue in this free bucket. */
182 static int enqueue_in_free(struct tdb_context *tdb,
185 struct tdb_free_record *new)
188 /* new->next = head. */
189 new->next = tdb_read_off(tdb, b_off);
190 if (new->next == TDB_OFF_ERR)
195 if (tdb_read_off(tdb,
197 + offsetof(struct tdb_free_record, prev))
199 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
200 "enqueue_in_free: %llu bad head prev %llu\n",
201 (long long)new->next, (long long)b_off);
205 /* next->prev = new. */
206 if (tdb_write_off(tdb, new->next
207 + offsetof(struct tdb_free_record, prev),
212 if (tdb_write_off(tdb, b_off, off) != 0)
215 return tdb_write_convert(tdb, off, new, sizeof(*new));
218 /* List need not be locked. */
219 int add_free_record(struct tdb_context *tdb,
220 unsigned int zone_bits,
221 tdb_off_t off, tdb_len_t len_with_header)
223 struct tdb_free_record new;
227 assert(len_with_header >= sizeof(new));
228 assert(zone_bits < (1 << 6));
230 new.magic_and_meta = TDB_FREE_MAGIC | zone_bits;
231 new.data_len = len_with_header - sizeof(struct tdb_used_record);
233 b_off = bucket_off(zone_off(off, zone_bits),
234 size_to_bucket(zone_bits, new.data_len));
235 if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) != 0)
238 ret = enqueue_in_free(tdb, b_off, off, &new);
239 tdb_unlock_free_bucket(tdb, b_off);
243 static size_t adjust_size(size_t keylen, size_t datalen, bool want_extra)
245 size_t size = keylen + datalen;
247 /* We want at least 50% growth for data. */
251 if (size < TDB_MIN_DATA_LEN)
252 size = TDB_MIN_DATA_LEN;
254 /* Round to next uint64_t boundary. */
255 return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
258 /* If we have enough left over to be useful, split that off. */
259 static size_t record_leftover(size_t keylen, size_t datalen,
260 bool want_extra, size_t total_len)
264 /* We might *want* extra, but not have it, so leftover is negative. */
265 leftover = total_len - adjust_size(keylen, datalen, want_extra);
266 if (leftover < (ssize_t)sizeof(struct tdb_free_record))
269 /* If we want extra anwyay, don't split unless we have 2x size. */
270 if (want_extra && leftover <= datalen / 2)
276 /* Note: we unlock the current bucket if we coalesce or fail. */
277 static int coalesce(struct tdb_context *tdb,
278 tdb_off_t zone_off, unsigned zone_bits,
279 tdb_off_t off, tdb_off_t b_off, tdb_len_t data_len)
281 struct tdb_free_record pad, *r;
282 tdb_off_t end = off + sizeof(struct tdb_used_record) + data_len;
284 while (end < (zone_off + (1ULL << zone_bits))) {
287 /* FIXME: do tdb_get here and below really win? */
288 r = tdb_get(tdb, end, &pad, sizeof(pad));
292 if (frec_magic(r) != TDB_FREE_MAGIC)
295 nb_off = bucket_off(zone_off,
296 size_to_bucket(zone_bits, r->data_len));
298 /* We may be violating lock order here, so best effort. */
299 if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT) == -1)
302 /* Now we have lock, re-check. */
303 r = tdb_get(tdb, end, &pad, sizeof(pad));
305 tdb_unlock_free_bucket(tdb, nb_off);
309 if (unlikely(frec_magic(r) != TDB_FREE_MAGIC)) {
310 tdb_unlock_free_bucket(tdb, nb_off);
314 if (unlikely(bucket_off(zone_off,
315 size_to_bucket(zone_bits, r->data_len))
317 tdb_unlock_free_bucket(tdb, nb_off);
321 if (remove_from_list(tdb, nb_off, end, r) == -1) {
322 tdb_unlock_free_bucket(tdb, nb_off);
326 end += sizeof(struct tdb_used_record) + r->data_len;
327 tdb_unlock_free_bucket(tdb, nb_off);
330 /* Didn't find any adjacent free? */
331 if (end == off + sizeof(struct tdb_used_record) + data_len)
334 /* OK, expand record */
335 r = tdb_get(tdb, off, &pad, sizeof(pad));
339 if (r->data_len != data_len) {
340 tdb->ecode = TDB_ERR_CORRUPT;
341 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
342 "coalesce: expected data len %llu not %llu\n",
343 (long long)data_len, (long long)r->data_len);
347 if (remove_from_list(tdb, b_off, off, r) == -1)
350 r = tdb_access_write(tdb, off, sizeof(*r), true);
354 /* We have to drop this to avoid deadlocks, so make sure record
355 * doesn't get coalesced by someone else! */
356 r->magic_and_meta = TDB_COALESCING_MAGIC | zone_bits;
357 r->data_len = end - off - sizeof(struct tdb_used_record);
358 if (tdb_access_commit(tdb, r) != 0)
361 tdb_unlock_free_bucket(tdb, b_off);
363 if (add_free_record(tdb, zone_bits, off, end - off) == -1)
368 /* To unify error paths, we *always* unlock bucket on error. */
369 tdb_unlock_free_bucket(tdb, b_off);
373 /* We need size bytes to put our key and data in. */
374 static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
378 size_t keylen, size_t datalen,
382 tdb_off_t off, b_off,best_off;
383 struct tdb_free_record pad, best = { 0 }, *r;
385 size_t size = keylen + datalen;
388 b_off = bucket_off(zone_off, bucket);
390 /* FIXME: Try non-blocking wait first, to measure contention.
391 * If we're contented, try switching zones, and don't enlarge zone
392 * next time (we want more zones). */
393 /* Lock this bucket. */
394 if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == -1) {
398 best.data_len = -1ULL;
401 /* Get slack if we're after extra. */
407 /* Walk the list to see if any are large enough, getting less fussy
409 off = tdb_read_off(tdb, b_off);
410 if (unlikely(off == TDB_OFF_ERR))
414 /* FIXME: Does tdb_get win anything here? */
415 r = tdb_get(tdb, off, &pad, sizeof(*r));
419 if (frec_magic(r) != TDB_FREE_MAGIC) {
420 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
421 "lock_and_alloc: %llu non-free 0x%llx\n",
422 (long long)off, (long long)r->magic_and_meta);
426 if (r->data_len >= size && r->data_len < best.data_len) {
431 if (best.data_len < size * multiplier && best_off)
436 /* Since we're going slow anyway, try coalescing here. */
437 switch (coalesce(tdb, zone_off, zone_bits, off, b_off,
440 /* This has already unlocked on error. */
443 /* This has unlocked list, restart. */
449 /* If we found anything at all, use it. */
451 struct tdb_used_record rec;
454 /* We're happy with this size: take it. */
455 if (remove_from_list(tdb, b_off, best_off, &best) != 0)
458 leftover = record_leftover(keylen, datalen, want_extra,
461 /* We need to mark non-free before we drop lock, otherwise
462 * coalesce() could try to merge it! */
463 if (set_header(tdb, &rec, keylen, datalen,
464 best.data_len - leftover,
465 hashlow, zone_bits) != 0)
468 if (tdb_write_convert(tdb, best_off, &rec, sizeof(rec)) != 0)
471 tdb_unlock_free_bucket(tdb, b_off);
474 if (add_free_record(tdb, zone_bits,
475 best_off + sizeof(rec)
476 + best.data_len - leftover,
483 tdb_unlock_free_bucket(tdb, b_off);
487 tdb_unlock_free_bucket(tdb, b_off);
491 static bool next_zone(struct tdb_context *tdb)
493 tdb_off_t next = tdb->zone_off + (1ULL << tdb->zhdr.zone_bits);
495 /* We must have a header. */
496 if (tdb->methods->oob(tdb, next + sizeof(tdb->zhdr), true))
499 tdb->zone_off = next;
500 return tdb_read_convert(tdb, next, &tdb->zhdr, sizeof(tdb->zhdr)) == 0;
503 /* Offset returned is within current zone (which it may alter). */
504 static tdb_off_t get_free(struct tdb_context *tdb,
505 size_t keylen, size_t datalen, bool want_extra,
508 tdb_off_t start_zone = tdb->zone_off, off;
509 bool wrapped = false;
510 size_t size = adjust_size(keylen, datalen, want_extra);
512 /* If they are growing, add 50% to get to higher bucket. */
516 /* FIXME: If we don't get a hit in the first bucket we want,
517 * try changing zones for next time. That should help wear
518 * zones evenly, so we don't need to search all of them before
520 while (!wrapped || tdb->zone_off != start_zone) {
523 /* Shortcut for really huge allocations... */
524 if ((size >> tdb->zhdr.zone_bits) != 0)
527 /* Start at exact size bucket, and search up... */
528 b = size_to_bucket(tdb->zhdr.zone_bits, size);
529 for (b = find_free_head(tdb, b);
530 b <= BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits);
531 b += find_free_head(tdb, b + 1)) {
532 /* Try getting one from list. */
533 off = lock_and_alloc(tdb, tdb->zone_off,
535 b, keylen, datalen, want_extra,
537 if (off == TDB_OFF_ERR)
541 /* Didn't work. Try next bucket. */
545 /* Didn't work, try next zone, if it exists. */
546 if (!next_zone(tdb)) {
548 tdb->zone_off = sizeof(struct tdb_header);
549 if (tdb_read_convert(tdb, tdb->zone_off,
550 &tdb->zhdr, sizeof(tdb->zhdr))) {
558 int set_header(struct tdb_context *tdb,
559 struct tdb_used_record *rec,
560 uint64_t keylen, uint64_t datalen,
561 uint64_t actuallen, unsigned hashlow,
562 unsigned int zone_bits)
564 uint64_t keybits = (fls64(keylen) + 1) / 2;
566 /* Use bottom bits of hash, so it's independent of hash table size. */
569 | ((hashlow & ((1 << 5)-1)) << 6)
570 | ((actuallen - (keylen + datalen)) << 11)
573 rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
575 /* Encoding can fail on big values. */
576 if (rec_key_length(rec) != keylen
577 || rec_data_length(rec) != datalen
578 || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
579 tdb->ecode = TDB_ERR_IO;
580 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
581 "Could not encode k=%llu,d=%llu,a=%llu\n",
582 (long long)keylen, (long long)datalen,
583 (long long)actuallen);
589 static bool zones_happy(struct tdb_context *tdb)
591 /* FIXME: look at distribution of zones. */
595 /* Assume we want buckets up to the comfort factor. */
596 static tdb_len_t overhead(unsigned int zone_bits)
598 return sizeof(struct free_zone_header)
599 + (BUCKETS_FOR_ZONE(zone_bits) + 1) * sizeof(tdb_off_t);
602 /* Expand the database (by adding a zone). */
603 static int tdb_expand(struct tdb_context *tdb, tdb_len_t size)
608 unsigned int num_buckets;
610 struct free_zone_header zhdr;
613 /* We need room for the record header too. */
614 wanted = sizeof(struct tdb_used_record) + size;
616 /* Only one person can expand file at a time. */
617 if (tdb_lock_expand(tdb, F_WRLCK) != 0)
620 /* Someone else may have expanded the file, so retry. */
621 old_size = tdb->map_size;
622 tdb->methods->oob(tdb, tdb->map_size + 1, true);
623 if (tdb->map_size != old_size)
626 /* FIXME: Tailer is a bogus optimization, remove it. */
627 /* zone bits tailer char is protected by EXPAND lock. */
628 if (tdb->methods->read(tdb, old_size - 1, &zone_bits, 1) == -1)
631 /* If zones aren't working well, add larger zone if possible. */
632 enlarge_zone = !zones_happy(tdb);
634 /* New zone can be between zone_bits or larger if we're on the right
637 /* Does this fit the allocation comfortably? */
638 if ((1ULL << zone_bits) >= overhead(zone_bits) + wanted) {
639 /* Only let enlarge_zone enlarge us once. */
642 enlarge_zone = false;
644 if ((old_size - 1 - sizeof(struct tdb_header))
650 zhdr.zone_bits = zone_bits;
651 num_buckets = BUCKETS_FOR_ZONE(zone_bits);
653 /* FIXME: I don't think we need to expand to full zone, do we? */
654 if (tdb->methods->expand_file(tdb, 1ULL << zone_bits) == -1)
657 /* Write new tailer. */
658 if (tdb->methods->write(tdb, tdb->map_size - 1, &zone_bits, 1) == -1)
661 /* Write new zone header (just before old tailer). */
663 if (tdb_write_convert(tdb, off, &zhdr, sizeof(zhdr)) == -1)
666 /* Now write empty buckets. */
668 if (zero_out(tdb, off, (num_buckets+1) * sizeof(tdb_off_t)) == -1)
670 off += (num_buckets+1) * sizeof(tdb_off_t);
672 /* Now add the rest as our free record. */
673 if (add_free_record(tdb, zone_bits, off, tdb->map_size-1-off) == -1)
676 /* Try allocating from this zone now. */
677 tdb->zone_off = old_size - 1;
681 tdb_unlock_expand(tdb, F_WRLCK);
685 tdb_unlock_expand(tdb, F_WRLCK);
689 /* This won't fail: it will expand the database if it has to. */
690 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
691 uint64_t hash, bool growing)
695 /* We can't hold pointers during this: we could unmap! */
696 assert(!tdb->direct_access);
699 off = get_free(tdb, keylen, datalen, growing, hash);
700 if (likely(off != 0))
703 if (tdb_expand(tdb, adjust_size(keylen, datalen, growing)))