2 Trivial Database 2: free list/block handling
3 Copyright (C) Rusty Russell 2010
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 3 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <ccan/likely/likely.h>
20 #include <ccan/ilog/ilog.h>
25 static unsigned fls64(uint64_t val)
30 /* In which bucket would we find a particular record size? (ignoring header) */
31 unsigned int size_to_bucket(tdb_len_t data_len)
35 /* We can't have records smaller than this. */
36 assert(data_len >= TDB_MIN_DATA_LEN);
38 /* Ignoring the header... */
39 if (data_len - TDB_MIN_DATA_LEN <= 64) {
40 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 8. */
41 bucket = (data_len - TDB_MIN_DATA_LEN) / 8;
43 /* After that we go power of 2. */
44 bucket = fls64(data_len - TDB_MIN_DATA_LEN) + 2;
47 if (unlikely(bucket >= TDB_FREE_BUCKETS))
48 bucket = TDB_FREE_BUCKETS - 1;
52 tdb_off_t first_flist(struct tdb_context *tdb)
54 return tdb_read_off(tdb, offsetof(struct tdb_header, free_list));
57 tdb_off_t next_flist(struct tdb_context *tdb, tdb_off_t flist)
59 return tdb_read_off(tdb, flist + offsetof(struct tdb_freelist, next));
62 int tdb_flist_init(struct tdb_context *tdb)
64 /* Use reservoir sampling algorithm to select a free list at random. */
65 unsigned int rnd, max = 0, count = 0;
68 tdb->flist_off = off = first_flist(tdb);
72 if (off == TDB_OFF_ERR)
82 off = next_flist(tdb, off);
88 /* Offset of a given bucket. */
89 tdb_off_t bucket_off(tdb_off_t flist_off, unsigned bucket)
91 return flist_off + offsetof(struct tdb_freelist, buckets)
92 + bucket * sizeof(tdb_off_t);
95 /* Returns free_buckets + 1, or list number to search. */
96 static tdb_off_t find_free_head(struct tdb_context *tdb,
100 /* Speculatively search for a non-zero bucket. */
101 return tdb_find_nonzero_off(tdb, bucket_off(flist_off, 0),
102 bucket, TDB_FREE_BUCKETS);
105 /* Remove from free bucket. */
106 static int remove_from_list(struct tdb_context *tdb,
107 tdb_off_t b_off, tdb_off_t r_off,
108 struct tdb_free_record *r)
113 if (frec_prev(r) == 0) {
116 off = frec_prev(r) + offsetof(struct tdb_free_record, next);
120 if (tdb_read_off(tdb, off) != r_off) {
121 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
122 "remove_from_list: %llu bad prev in list %llu\n",
123 (long long)r_off, (long long)b_off);
128 /* r->prev->next = r->next */
129 if (tdb_write_off(tdb, off, r->next)) {
134 off = r->next + offsetof(struct tdb_free_record,magic_and_prev);
135 /* r->next->prev = r->prev */
138 if (tdb_read_off(tdb, off) & TDB_OFF_MASK != r_off) {
139 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
140 "remove_from_list: %llu bad list %llu\n",
141 (long long)r_off, (long long)b_off);
146 if (tdb_write_off(tdb, off, r->magic_and_prev)) {
153 /* Enqueue in this free bucket. */
154 static int enqueue_in_free(struct tdb_context *tdb,
159 struct tdb_free_record new;
160 uint64_t magic = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL));
162 /* We only need to set flist_and_len; rest is set in enqueue_in_free */
163 new.flist_and_len = ((uint64_t)tdb->flist << (64 - TDB_OFF_UPPER_STEAL))
166 new.magic_and_prev = magic;
168 /* new->next = head. */
169 new.next = tdb_read_off(tdb, b_off);
170 if (new.next == TDB_OFF_ERR)
175 if (tdb_read_off(tdb,
176 new.next + offsetof(struct tdb_free_record,
179 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
180 "enqueue_in_free: %llu bad head prev %llu\n",
181 (long long)new.next, (long long)b_off);
185 /* next->prev = new. */
186 if (tdb_write_off(tdb, new.next
187 + offsetof(struct tdb_free_record,
193 if (tdb_write_off(tdb, b_off, off) != 0)
196 return tdb_write_convert(tdb, off, &new, sizeof(new));
199 /* List need not be locked. */
200 int add_free_record(struct tdb_context *tdb,
201 tdb_off_t off, tdb_len_t len_with_header)
207 assert(len_with_header >= sizeof(struct tdb_free_record));
209 len = len_with_header - sizeof(struct tdb_used_record);
211 b_off = bucket_off(tdb->flist_off, size_to_bucket(len));
212 if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) != 0)
215 ret = enqueue_in_free(tdb, b_off, off, len);
216 tdb_unlock_free_bucket(tdb, b_off);
220 static size_t adjust_size(size_t keylen, size_t datalen)
222 size_t size = keylen + datalen;
224 if (size < TDB_MIN_DATA_LEN)
225 size = TDB_MIN_DATA_LEN;
227 /* Round to next uint64_t boundary. */
228 return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
231 /* If we have enough left over to be useful, split that off. */
232 static size_t record_leftover(size_t keylen, size_t datalen,
233 bool want_extra, size_t total_len)
238 datalen += datalen / 2;
239 leftover = total_len - adjust_size(keylen, datalen);
241 if (leftover < (ssize_t)sizeof(struct tdb_free_record))
247 /* FIXME: Shortcut common case where tdb->flist == flist */
248 static tdb_off_t flist_offset(struct tdb_context *tdb, unsigned int flist)
250 tdb_off_t off = first_flist(tdb);
253 for (i = 0; i < flist; i++)
254 off = next_flist(tdb, off);
258 /* Note: we unlock the current bucket if we coalesce or fail. */
259 static int coalesce(struct tdb_context *tdb,
260 tdb_off_t off, tdb_off_t b_off, tdb_len_t data_len)
262 struct tdb_free_record pad, *r;
265 end = off + sizeof(struct tdb_used_record) + data_len;
267 while (end < tdb->map_size) {
269 unsigned flist, bucket;
271 /* FIXME: do tdb_get here and below really win? */
272 r = tdb_get(tdb, end, &pad, sizeof(pad));
276 if (frec_magic(r) != TDB_FREE_MAGIC)
279 flist = frec_flist(r);
280 bucket = size_to_bucket(frec_len(r));
281 nb_off = bucket_off(flist_offset(tdb, flist), bucket);
283 /* We may be violating lock order here, so best effort. */
284 if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT) == -1)
287 /* Now we have lock, re-check. */
288 r = tdb_get(tdb, end, &pad, sizeof(pad));
290 tdb_unlock_free_bucket(tdb, nb_off);
294 if (unlikely(frec_magic(r) != TDB_FREE_MAGIC)) {
295 tdb_unlock_free_bucket(tdb, nb_off);
299 if (unlikely(frec_flist(r) != flist)
300 || unlikely(size_to_bucket(frec_len(r)) != bucket)) {
301 tdb_unlock_free_bucket(tdb, nb_off);
305 if (remove_from_list(tdb, nb_off, end, r) == -1) {
306 tdb_unlock_free_bucket(tdb, nb_off);
310 end += sizeof(struct tdb_used_record) + frec_len(r);
311 tdb_unlock_free_bucket(tdb, nb_off);
314 /* Didn't find any adjacent free? */
315 if (end == off + sizeof(struct tdb_used_record) + data_len)
318 /* OK, expand record */
319 r = tdb_get(tdb, off, &pad, sizeof(pad));
323 if (frec_len(r) != data_len) {
324 tdb->ecode = TDB_ERR_CORRUPT;
325 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
326 "coalesce: expected data len %llu not %llu\n",
327 (long long)data_len, (long long)frec_len(r));
331 if (remove_from_list(tdb, b_off, off, r) == -1)
334 r = tdb_access_write(tdb, off, sizeof(*r), true);
338 /* We have to drop this to avoid deadlocks, so make sure record
339 * doesn't get coalesced by someone else! */
340 r->magic_and_prev = TDB_COALESCING_MAGIC << (64 - TDB_OFF_UPPER_STEAL);
341 /* FIXME: Use 255 as invalid free list? */
342 r->flist_and_len = end - off - sizeof(struct tdb_used_record);
343 if (tdb_access_commit(tdb, r) != 0)
346 tdb_unlock_free_bucket(tdb, b_off);
348 if (add_free_record(tdb, off, end - off) == -1)
353 /* To unify error paths, we *always* unlock bucket on error. */
354 tdb_unlock_free_bucket(tdb, b_off);
358 /* We need size bytes to put our key and data in. */
359 static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
362 size_t keylen, size_t datalen,
366 tdb_off_t off, b_off,best_off;
367 struct tdb_free_record pad, best = { 0 }, *r;
369 size_t size = adjust_size(keylen, datalen);
372 b_off = bucket_off(flist_off, bucket);
374 /* FIXME: Try non-blocking wait first, to measure contention. */
375 /* Lock this bucket. */
376 if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == -1) {
380 best.flist_and_len = -1ULL;
383 /* Get slack if we're after extra. */
389 /* Walk the list to see if any are large enough, getting less fussy
391 off = tdb_read_off(tdb, b_off);
392 if (unlikely(off == TDB_OFF_ERR))
396 /* FIXME: Does tdb_get win anything here? */
397 r = tdb_get(tdb, off, &pad, sizeof(*r));
401 if (frec_magic(r) != TDB_FREE_MAGIC) {
402 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
403 "lock_and_alloc: %llu non-free 0x%llx\n",
404 (long long)off, (long long)r->magic_and_prev);
408 if (frec_len(r) >= size && frec_len(r) < frec_len(&best)) {
413 if (frec_len(&best) < size * multiplier && best_off)
418 /* Since we're going slow anyway, try coalescing here. */
419 switch (coalesce(tdb, off, b_off, frec_len(r))) {
421 /* This has already unlocked on error. */
424 /* This has unlocked list, restart. */
430 /* If we found anything at all, use it. */
432 struct tdb_used_record rec;
435 /* We're happy with this size: take it. */
436 if (remove_from_list(tdb, b_off, best_off, &best) != 0)
439 leftover = record_leftover(keylen, datalen, want_extra,
442 assert(keylen + datalen + leftover <= frec_len(&best));
443 /* We need to mark non-free before we drop lock, otherwise
444 * coalesce() could try to merge it! */
445 if (set_used_header(tdb, &rec, keylen, datalen,
446 frec_len(&best) - leftover,
450 if (tdb_write_convert(tdb, best_off, &rec, sizeof(rec)) != 0)
453 /* Bucket of leftover will be <= current bucket, so nested
454 * locking is allowed. */
456 if (add_free_record(tdb,
457 best_off + sizeof(rec)
458 + frec_len(&best) - leftover,
460 best_off = TDB_OFF_ERR;
462 tdb_unlock_free_bucket(tdb, b_off);
467 tdb_unlock_free_bucket(tdb, b_off);
471 tdb_unlock_free_bucket(tdb, b_off);
475 /* Get a free block from current free list, or 0 if none. */
476 static tdb_off_t get_free(struct tdb_context *tdb,
477 size_t keylen, size_t datalen, bool want_extra,
480 tdb_off_t off, flist_off;
481 unsigned start_b, b, flist;
482 bool wrapped = false;
484 /* If they are growing, add 50% to get to higher bucket. */
486 start_b = size_to_bucket(adjust_size(keylen,
487 datalen + datalen / 2));
489 start_b = size_to_bucket(adjust_size(keylen, datalen));
491 flist_off = tdb->flist_off;
493 while (!wrapped || flist_off != tdb->flist_off) {
494 /* Start at exact size bucket, and search up... */
495 for (b = find_free_head(tdb, flist_off, start_b);
496 b < TDB_FREE_BUCKETS;
497 b = find_free_head(tdb, flist_off, b + 1)) {
498 /* Try getting one from list. */
499 off = lock_and_alloc(tdb, flist_off,
500 b, keylen, datalen, want_extra,
502 if (off == TDB_OFF_ERR)
505 /* Worked? Stay using this list. */
506 tdb->flist_off = flist_off;
510 /* Didn't work. Try next bucket. */
513 /* Hmm, try next list. */
514 flist_off = next_flist(tdb, flist_off);
516 if (flist_off == 0) {
518 flist_off = first_flist(tdb);
526 int set_used_header(struct tdb_context *tdb,
527 struct tdb_used_record *rec,
528 uint64_t keylen, uint64_t datalen,
529 uint64_t actuallen, unsigned hashlow)
531 uint64_t keybits = (fls64(keylen) + 1) / 2;
533 /* Use bottom bits of hash, so it's independent of hash table size. */
534 rec->magic_and_meta = (hashlow & ((1 << 11)-1))
535 | ((actuallen - (keylen + datalen)) << 11)
538 rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
540 /* Encoding can fail on big values. */
541 if (rec_key_length(rec) != keylen
542 || rec_data_length(rec) != datalen
543 || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
544 tdb->ecode = TDB_ERR_IO;
545 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
546 "Could not encode k=%llu,d=%llu,a=%llu\n",
547 (long long)keylen, (long long)datalen,
548 (long long)actuallen);
554 /* Expand the database. */
555 static int tdb_expand(struct tdb_context *tdb, tdb_len_t size)
560 /* We need room for the record header too. */
561 wanted = sizeof(struct tdb_used_record) + size;
563 /* Need to hold a hash lock to expand DB: transactions rely on it. */
564 if (!(tdb->flags & TDB_NOLOCK)
565 && !tdb->allrecord_lock.count && !tdb_has_hash_locks(tdb)) {
566 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
567 "tdb_expand: must hold lock during expand\n");
571 /* always make room for at least 100 more records, and at
572 least 25% more space. */
573 if (size * TDB_EXTENSION_FACTOR > tdb->map_size / 4)
574 wanted = size * TDB_EXTENSION_FACTOR;
576 wanted = tdb->map_size / 4;
577 wanted = adjust_size(0, wanted);
579 /* Only one person can expand file at a time. */
580 if (tdb_lock_expand(tdb, F_WRLCK) != 0)
583 /* Someone else may have expanded the file, so retry. */
584 old_size = tdb->map_size;
585 tdb->methods->oob(tdb, tdb->map_size + 1, true);
586 if (tdb->map_size != old_size) {
587 tdb_unlock_expand(tdb, F_WRLCK);
591 if (tdb->methods->expand_file(tdb, wanted) == -1) {
592 tdb_unlock_expand(tdb, F_WRLCK);
596 /* We need to drop this lock before adding free record. */
597 tdb_unlock_expand(tdb, F_WRLCK);
599 return add_free_record(tdb, old_size, wanted);
602 /* This won't fail: it will expand the database if it has to. */
603 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
604 uint64_t hash, bool growing)
608 /* We can't hold pointers during this: we could unmap! */
609 assert(!tdb->direct_access);
612 off = get_free(tdb, keylen, datalen, growing, hash);
613 if (likely(off != 0))
616 if (tdb_expand(tdb, adjust_size(keylen, datalen)))