2 Trivial Database 2: free list/block handling
3 Copyright (C) Rusty Russell 2010
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 3 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <ccan/likely/likely.h>
20 #include <ccan/ilog/ilog.h>
25 static unsigned fls64(uint64_t val)
30 /* In which bucket would we find a particular record size? (ignoring header) */
31 unsigned int size_to_bucket(tdb_len_t data_len)
35 /* We can't have records smaller than this. */
36 assert(data_len >= TDB_MIN_DATA_LEN);
38 /* Ignoring the header... */
39 if (data_len - TDB_MIN_DATA_LEN <= 64) {
40 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 8. */
41 bucket = (data_len - TDB_MIN_DATA_LEN) / 8;
43 /* After that we go power of 2. */
44 bucket = fls64(data_len - TDB_MIN_DATA_LEN) + 2;
47 if (unlikely(bucket >= TDB_FREE_BUCKETS))
48 bucket = TDB_FREE_BUCKETS - 1;
52 tdb_off_t first_ftable(struct tdb_context *tdb)
54 return tdb_read_off(tdb, offsetof(struct tdb_header, free_table));
57 tdb_off_t next_ftable(struct tdb_context *tdb, tdb_off_t ftable)
59 return tdb_read_off(tdb, ftable + offsetof(struct tdb_freetable,next));
62 enum TDB_ERROR tdb_ftable_init(struct tdb_context *tdb)
64 /* Use reservoir sampling algorithm to select a free list at random. */
65 unsigned int rnd, max = 0, count = 0;
68 tdb->ftable_off = off = first_ftable(tdb);
72 if (TDB_OFF_IS_ERR(off)) {
78 tdb->ftable_off = off;
83 off = next_ftable(tdb, off);
89 /* Offset of a given bucket. */
90 tdb_off_t bucket_off(tdb_off_t ftable_off, unsigned bucket)
92 return ftable_off + offsetof(struct tdb_freetable, buckets)
93 + bucket * sizeof(tdb_off_t);
96 /* Returns free_buckets + 1, or list number to search, or -ve error. */
97 static tdb_off_t find_free_head(struct tdb_context *tdb,
101 /* Speculatively search for a non-zero bucket. */
102 return tdb_find_nonzero_off(tdb, bucket_off(ftable_off, 0),
103 bucket, TDB_FREE_BUCKETS);
106 /* Remove from free bucket. */
107 static enum TDB_ERROR remove_from_list(struct tdb_context *tdb,
108 tdb_off_t b_off, tdb_off_t r_off,
109 const struct tdb_free_record *r)
112 enum TDB_ERROR ecode;
115 if (frec_prev(r) == 0) {
118 off = frec_prev(r) + offsetof(struct tdb_free_record, next);
121 #ifdef CCAN_TDB2_DEBUG
122 if (tdb_read_off(tdb, off) != r_off) {
123 return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
125 " %llu bad prev in list %llu",
126 (long long)r_off, (long long)b_off);
130 /* r->prev->next = r->next */
131 ecode = tdb_write_off(tdb, off, r->next);
132 if (ecode != TDB_SUCCESS) {
137 off = r->next + offsetof(struct tdb_free_record,magic_and_prev);
138 /* r->next->prev = r->prev */
140 #ifdef CCAN_TDB2_DEBUG
141 if (tdb_read_off(tdb, off) & TDB_OFF_MASK != r_off) {
142 return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
144 " %llu bad list %llu",
145 (long long)r_off, (long long)b_off);
149 ecode = tdb_write_off(tdb, off, r->magic_and_prev);
150 if (ecode != TDB_SUCCESS) {
157 /* Enqueue in this free bucket. */
158 static enum TDB_ERROR enqueue_in_free(struct tdb_context *tdb,
163 struct tdb_free_record new;
164 enum TDB_ERROR ecode;
165 uint64_t magic = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL));
167 /* We only need to set ftable_and_len; rest is set in enqueue_in_free */
168 new.ftable_and_len = ((uint64_t)tdb->ftable << (64 - TDB_OFF_UPPER_STEAL))
171 new.magic_and_prev = magic;
173 /* new->next = head. */
174 new.next = tdb_read_off(tdb, b_off);
175 if (TDB_OFF_IS_ERR(new.next)) {
180 #ifdef CCAN_TDB2_DEBUG
181 if (tdb_read_off(tdb,
182 new.next + offsetof(struct tdb_free_record,
185 return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
186 "enqueue_in_free: %llu bad head"
192 /* next->prev = new. */
193 ecode = tdb_write_off(tdb, new.next
194 + offsetof(struct tdb_free_record,
197 if (ecode != TDB_SUCCESS) {
202 ecode = tdb_write_off(tdb, b_off, off);
203 if (ecode != TDB_SUCCESS) {
207 return tdb_write_convert(tdb, off, &new, sizeof(new));
210 /* List need not be locked. */
211 enum TDB_ERROR add_free_record(struct tdb_context *tdb,
212 tdb_off_t off, tdb_len_t len_with_header)
216 enum TDB_ERROR ecode;
218 assert(len_with_header >= sizeof(struct tdb_free_record));
220 len = len_with_header - sizeof(struct tdb_used_record);
222 b_off = bucket_off(tdb->ftable_off, size_to_bucket(len));
223 ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT);
224 if (ecode != TDB_SUCCESS) {
228 ecode = enqueue_in_free(tdb, b_off, off, len);
229 tdb_unlock_free_bucket(tdb, b_off);
233 static size_t adjust_size(size_t keylen, size_t datalen)
235 size_t size = keylen + datalen;
237 if (size < TDB_MIN_DATA_LEN)
238 size = TDB_MIN_DATA_LEN;
240 /* Round to next uint64_t boundary. */
241 return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
244 /* If we have enough left over to be useful, split that off. */
245 static size_t record_leftover(size_t keylen, size_t datalen,
246 bool want_extra, size_t total_len)
251 datalen += datalen / 2;
252 leftover = total_len - adjust_size(keylen, datalen);
254 if (leftover < (ssize_t)sizeof(struct tdb_free_record))
260 static tdb_off_t ftable_offset(struct tdb_context *tdb, unsigned int ftable)
265 if (likely(tdb->ftable == ftable))
266 return tdb->ftable_off;
268 off = first_ftable(tdb);
269 for (i = 0; i < ftable; i++) {
270 if (TDB_OFF_IS_ERR(off)) {
273 off = next_ftable(tdb, off);
278 /* Note: we unlock the current bucket if we coalesce or fail. */
279 static tdb_bool_err coalesce(struct tdb_context *tdb,
280 tdb_off_t off, tdb_off_t b_off,
284 struct tdb_free_record rec;
285 enum TDB_ERROR ecode;
287 add_stat(tdb, alloc_coalesce_tried, 1);
288 end = off + sizeof(struct tdb_used_record) + data_len;
290 while (end < tdb->map_size) {
291 const struct tdb_free_record *r;
293 unsigned ftable, bucket;
295 r = tdb_access_read(tdb, end, sizeof(*r), true);
296 if (TDB_PTR_IS_ERR(r)) {
297 ecode = TDB_PTR_ERR(r);
301 if (frec_magic(r) != TDB_FREE_MAGIC
302 || frec_ftable(r) == TDB_FTABLE_NONE) {
303 tdb_access_release(tdb, r);
307 ftable = frec_ftable(r);
308 bucket = size_to_bucket(frec_len(r));
309 nb_off = ftable_offset(tdb, ftable);
310 if (TDB_OFF_IS_ERR(nb_off)) {
311 tdb_access_release(tdb, r);
315 nb_off = bucket_off(nb_off, bucket);
316 tdb_access_release(tdb, r);
318 /* We may be violating lock order here, so best effort. */
319 if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT)
321 add_stat(tdb, alloc_coalesce_lockfail, 1);
325 /* Now we have lock, re-check. */
326 ecode = tdb_read_convert(tdb, end, &rec, sizeof(rec));
327 if (ecode != TDB_SUCCESS) {
328 tdb_unlock_free_bucket(tdb, nb_off);
332 if (unlikely(frec_magic(&rec) != TDB_FREE_MAGIC)) {
333 add_stat(tdb, alloc_coalesce_race, 1);
334 tdb_unlock_free_bucket(tdb, nb_off);
338 if (unlikely(frec_ftable(&rec) != ftable)
339 || unlikely(size_to_bucket(frec_len(&rec)) != bucket)) {
340 add_stat(tdb, alloc_coalesce_race, 1);
341 tdb_unlock_free_bucket(tdb, nb_off);
345 ecode = remove_from_list(tdb, nb_off, end, &rec);
346 if (ecode != TDB_SUCCESS) {
347 tdb_unlock_free_bucket(tdb, nb_off);
351 end += sizeof(struct tdb_used_record) + frec_len(&rec);
352 tdb_unlock_free_bucket(tdb, nb_off);
353 add_stat(tdb, alloc_coalesce_num_merged, 1);
356 /* Didn't find any adjacent free? */
357 if (end == off + sizeof(struct tdb_used_record) + data_len)
360 /* OK, expand initial record */
361 ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
362 if (ecode != TDB_SUCCESS) {
366 if (frec_len(&rec) != data_len) {
367 ecode = tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
368 "coalesce: expected data len %zu not %zu",
369 (size_t)data_len, (size_t)frec_len(&rec));
373 ecode = remove_from_list(tdb, b_off, off, &rec);
374 if (ecode != TDB_SUCCESS) {
378 /* We have to drop this to avoid deadlocks, so make sure record
379 * doesn't get coalesced by someone else! */
380 rec.ftable_and_len = (TDB_FTABLE_NONE << (64 - TDB_OFF_UPPER_STEAL))
381 | (end - off - sizeof(struct tdb_used_record));
382 ecode = tdb_write_off(tdb, off + offsetof(struct tdb_free_record,
385 if (ecode != TDB_SUCCESS) {
389 add_stat(tdb, alloc_coalesce_succeeded, 1);
390 tdb_unlock_free_bucket(tdb, b_off);
392 ecode = add_free_record(tdb, off, end - off);
393 if (ecode != TDB_SUCCESS) {
399 /* To unify error paths, we *always* unlock bucket on error. */
400 tdb_unlock_free_bucket(tdb, b_off);
404 /* We need size bytes to put our key and data in. */
405 static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
406 tdb_off_t ftable_off,
408 size_t keylen, size_t datalen,
413 tdb_off_t off, b_off,best_off;
414 struct tdb_free_record best = { 0 };
416 size_t size = adjust_size(keylen, datalen);
417 enum TDB_ERROR ecode;
419 add_stat(tdb, allocs, 1);
421 b_off = bucket_off(ftable_off, bucket);
423 /* FIXME: Try non-blocking wait first, to measure contention. */
424 /* Lock this bucket. */
425 ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT);
426 if (ecode != TDB_SUCCESS) {
430 best.ftable_and_len = -1ULL;
433 /* Get slack if we're after extra. */
439 /* Walk the list to see if any are large enough, getting less fussy
441 off = tdb_read_off(tdb, b_off);
442 if (TDB_OFF_IS_ERR(off)) {
448 const struct tdb_free_record *r;
453 r = tdb_access_read(tdb, off, sizeof(*r), true);
454 if (TDB_PTR_IS_ERR(r)) {
455 ecode = TDB_PTR_ERR(r);
459 if (frec_magic(r) != TDB_FREE_MAGIC) {
460 tdb_access_release(tdb, r);
461 ecode = tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
463 " %llu non-free 0x%llx",
465 (long long)r->magic_and_prev);
469 if (frec_len(r) >= size && frec_len(r) < frec_len(&best)) {
474 if (frec_len(&best) < size * multiplier && best_off) {
475 tdb_access_release(tdb, r);
483 tdb_access_release(tdb, r);
485 /* Since we're going slow anyway, try coalescing here. */
486 coal = coalesce(tdb, off, b_off, len);
488 /* This has unlocked list, restart. */
492 /* This has already unlocked on error. */
498 /* If we found anything at all, use it. */
500 struct tdb_used_record rec;
503 /* We're happy with this size: take it. */
504 ecode = remove_from_list(tdb, b_off, best_off, &best);
505 if (ecode != TDB_SUCCESS) {
509 leftover = record_leftover(keylen, datalen, want_extra,
512 assert(keylen + datalen + leftover <= frec_len(&best));
513 /* We need to mark non-free before we drop lock, otherwise
514 * coalesce() could try to merge it! */
515 ecode = set_header(tdb, &rec, magic, keylen, datalen,
516 frec_len(&best) - leftover, hashlow);
517 if (ecode != TDB_SUCCESS) {
521 ecode = tdb_write_convert(tdb, best_off, &rec, sizeof(rec));
522 if (ecode != TDB_SUCCESS) {
526 /* Bucket of leftover will be <= current bucket, so nested
527 * locking is allowed. */
529 add_stat(tdb, alloc_leftover, 1);
530 ecode = add_free_record(tdb,
531 best_off + sizeof(rec)
532 + frec_len(&best) - leftover,
534 if (ecode != TDB_SUCCESS) {
538 tdb_unlock_free_bucket(tdb, b_off);
543 tdb_unlock_free_bucket(tdb, b_off);
547 tdb_unlock_free_bucket(tdb, b_off);
551 /* Get a free block from current free list, or 0 if none, -ve on error. */
552 static tdb_off_t get_free(struct tdb_context *tdb,
553 size_t keylen, size_t datalen, bool want_extra,
554 unsigned magic, unsigned hashlow)
556 tdb_off_t off, ftable_off;
557 tdb_off_t start_b, b, ftable;
558 bool wrapped = false;
560 /* If they are growing, add 50% to get to higher bucket. */
562 start_b = size_to_bucket(adjust_size(keylen,
563 datalen + datalen / 2));
565 start_b = size_to_bucket(adjust_size(keylen, datalen));
567 ftable_off = tdb->ftable_off;
568 ftable = tdb->ftable;
569 while (!wrapped || ftable_off != tdb->ftable_off) {
570 /* Start at exact size bucket, and search up... */
571 for (b = find_free_head(tdb, ftable_off, start_b);
572 b < TDB_FREE_BUCKETS;
573 b = find_free_head(tdb, ftable_off, b + 1)) {
574 /* Try getting one from list. */
575 off = lock_and_alloc(tdb, ftable_off,
576 b, keylen, datalen, want_extra,
578 if (TDB_OFF_IS_ERR(off))
582 add_stat(tdb, alloc_bucket_exact, 1);
583 if (b == TDB_FREE_BUCKETS - 1)
584 add_stat(tdb, alloc_bucket_max, 1);
585 /* Worked? Stay using this list. */
586 tdb->ftable_off = ftable_off;
587 tdb->ftable = ftable;
590 /* Didn't work. Try next bucket. */
593 if (TDB_OFF_IS_ERR(b)) {
597 /* Hmm, try next table. */
598 ftable_off = next_ftable(tdb, ftable_off);
599 if (TDB_OFF_IS_ERR(ftable_off)) {
604 if (ftable_off == 0) {
606 ftable_off = first_ftable(tdb);
607 if (TDB_OFF_IS_ERR(ftable_off)) {
617 enum TDB_ERROR set_header(struct tdb_context *tdb,
618 struct tdb_used_record *rec,
619 unsigned magic, uint64_t keylen, uint64_t datalen,
620 uint64_t actuallen, unsigned hashlow)
622 uint64_t keybits = (fls64(keylen) + 1) / 2;
624 /* Use bottom bits of hash, so it's independent of hash table size. */
625 rec->magic_and_meta = (hashlow & ((1 << 11)-1))
626 | ((actuallen - (keylen + datalen)) << 11)
628 | ((uint64_t)magic << 48);
629 rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
631 /* Encoding can fail on big values. */
632 if (rec_key_length(rec) != keylen
633 || rec_data_length(rec) != datalen
634 || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
635 return tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
636 "Could not encode k=%llu,d=%llu,a=%llu",
637 (long long)keylen, (long long)datalen,
638 (long long)actuallen);
643 /* Expand the database. */
644 static enum TDB_ERROR tdb_expand(struct tdb_context *tdb, tdb_len_t size)
648 enum TDB_ERROR ecode;
650 /* We need room for the record header too. */
651 wanted = sizeof(struct tdb_used_record) + size;
653 /* Need to hold a hash lock to expand DB: transactions rely on it. */
654 if (!(tdb->flags & TDB_NOLOCK)
655 && !tdb->allrecord_lock.count && !tdb_has_hash_locks(tdb)) {
656 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
657 "tdb_expand: must hold lock during expand");
660 /* always make room for at least 100 more records, and at
661 least 25% more space. */
662 if (size * TDB_EXTENSION_FACTOR > tdb->map_size / 4)
663 wanted = size * TDB_EXTENSION_FACTOR;
665 wanted = tdb->map_size / 4;
666 wanted = adjust_size(0, wanted);
668 /* Only one person can expand file at a time. */
669 ecode = tdb_lock_expand(tdb, F_WRLCK);
670 if (ecode != TDB_SUCCESS) {
674 /* Someone else may have expanded the file, so retry. */
675 old_size = tdb->map_size;
676 tdb->methods->oob(tdb, tdb->map_size + 1, true);
677 if (tdb->map_size != old_size) {
678 tdb_unlock_expand(tdb, F_WRLCK);
682 ecode = tdb->methods->expand_file(tdb, wanted);
683 if (ecode != TDB_SUCCESS) {
684 tdb_unlock_expand(tdb, F_WRLCK);
688 /* We need to drop this lock before adding free record. */
689 tdb_unlock_expand(tdb, F_WRLCK);
691 add_stat(tdb, expands, 1);
692 return add_free_record(tdb, old_size, wanted);
695 /* This won't fail: it will expand the database if it has to. */
696 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
697 uint64_t hash, unsigned magic, bool growing)
701 /* We can't hold pointers during this: we could unmap! */
702 assert(!tdb->direct_access);
705 enum TDB_ERROR ecode;
706 off = get_free(tdb, keylen, datalen, growing, magic, hash);
707 if (likely(off != 0))
710 ecode = tdb_expand(tdb, adjust_size(keylen, datalen));
711 if (ecode != TDB_SUCCESS) {