2 Trivial Database 2: free list/block handling
3 Copyright (C) Rusty Russell 2010
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 3 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <ccan/likely/likely.h>
20 #include <ccan/ilog/ilog.h>
25 static unsigned fls64(uint64_t val)
30 /* In which bucket would we find a particular record size? (ignoring header) */
31 unsigned int size_to_bucket(tdb_len_t data_len)
35 /* We can't have records smaller than this. */
36 assert(data_len >= TDB_MIN_DATA_LEN);
38 /* Ignoring the header... */
39 if (data_len - TDB_MIN_DATA_LEN <= 64) {
40 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 8. */
41 bucket = (data_len - TDB_MIN_DATA_LEN) / 8;
43 /* After that we go power of 2. */
44 bucket = fls64(data_len - TDB_MIN_DATA_LEN) + 2;
47 if (unlikely(bucket >= TDB_FREE_BUCKETS))
48 bucket = TDB_FREE_BUCKETS - 1;
52 tdb_off_t first_ftable(struct tdb_context *tdb)
54 return tdb_read_off(tdb, offsetof(struct tdb_header, free_table));
57 tdb_off_t next_ftable(struct tdb_context *tdb, tdb_off_t ftable)
59 return tdb_read_off(tdb, ftable + offsetof(struct tdb_freetable,next));
62 enum TDB_ERROR tdb_ftable_init(struct tdb_context *tdb)
64 /* Use reservoir sampling algorithm to select a free list at random. */
65 unsigned int rnd, max = 0, count = 0;
68 tdb->ftable_off = off = first_ftable(tdb);
72 if (TDB_OFF_IS_ERR(off)) {
78 tdb->ftable_off = off;
83 off = next_ftable(tdb, off);
89 /* Offset of a given bucket. */
90 tdb_off_t bucket_off(tdb_off_t ftable_off, unsigned bucket)
92 return ftable_off + offsetof(struct tdb_freetable, buckets)
93 + bucket * sizeof(tdb_off_t);
96 /* Returns free_buckets + 1, or list number to search, or -ve error. */
97 static tdb_off_t find_free_head(struct tdb_context *tdb,
101 /* Speculatively search for a non-zero bucket. */
102 return tdb_find_nonzero_off(tdb, bucket_off(ftable_off, 0),
103 bucket, TDB_FREE_BUCKETS);
106 static void check_list(struct tdb_context *tdb, tdb_off_t b_off)
108 #ifdef CCAN_TDB2_DEBUG
109 tdb_off_t off, prev = 0, first;
110 struct tdb_free_record r;
112 first = off = (tdb_read_off(tdb, b_off) & TDB_OFF_MASK);
114 tdb_read_convert(tdb, off, &r, sizeof(r));
115 if (frec_magic(&r) != TDB_FREE_MAGIC)
117 if (prev && frec_prev(&r) != prev)
124 tdb_read_convert(tdb, first, &r, sizeof(r));
125 if (frec_prev(&r) != prev)
131 /* Remove from free bucket. */
132 static enum TDB_ERROR remove_from_list(struct tdb_context *tdb,
133 tdb_off_t b_off, tdb_off_t r_off,
134 const struct tdb_free_record *r)
136 tdb_off_t off, prev_next, head;
137 enum TDB_ERROR ecode;
139 /* Is this only element in list? Zero out bucket, and we're done. */
140 if (frec_prev(r) == r_off)
141 return tdb_write_off(tdb, b_off, 0);
143 /* off = &r->prev->next */
144 off = frec_prev(r) + offsetof(struct tdb_free_record, next);
147 prev_next = tdb_read_off(tdb, off);
148 if (TDB_OFF_IS_ERR(prev_next))
151 /* If prev->next == 0, we were head: update bucket to point to next. */
152 if (prev_next == 0) {
153 /* We must preserve upper bits. */
154 head = tdb_read_off(tdb, b_off);
155 if (TDB_OFF_IS_ERR(head))
158 if ((head & TDB_OFF_MASK) != r_off) {
159 return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
161 " %llu head %llu on list %llu",
166 head = ((head & ~TDB_OFF_MASK) | r->next);
167 ecode = tdb_write_off(tdb, b_off, head);
168 if (ecode != TDB_SUCCESS)
171 /* r->prev->next = r->next */
172 ecode = tdb_write_off(tdb, off, r->next);
173 if (ecode != TDB_SUCCESS)
177 /* If we were the tail, off = &head->prev. */
179 head = tdb_read_off(tdb, b_off);
180 if (TDB_OFF_IS_ERR(head))
182 head &= TDB_OFF_MASK;
183 off = head + offsetof(struct tdb_free_record, magic_and_prev);
185 /* off = &r->next->prev */
186 off = r->next + offsetof(struct tdb_free_record,
190 #ifdef CCAN_TDB2_DEBUG
192 if ((tdb_read_off(tdb, off) & TDB_OFF_MASK) != r_off) {
193 return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
195 " %llu bad prev in list %llu",
196 (long long)r_off, (long long)b_off);
199 /* r->next->prev = r->prev */
200 return tdb_write_off(tdb, off, r->magic_and_prev);
203 /* Enqueue in this free bucket: sets coalesce if we've added 128
205 static enum TDB_ERROR enqueue_in_free(struct tdb_context *tdb,
211 struct tdb_free_record new;
212 enum TDB_ERROR ecode;
213 tdb_off_t prev, head;
214 uint64_t magic = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL));
216 head = tdb_read_off(tdb, b_off);
217 if (TDB_OFF_IS_ERR(head))
220 /* We only need to set ftable_and_len; rest is set in enqueue_in_free */
221 new.ftable_and_len = ((uint64_t)tdb->ftable << (64 - TDB_OFF_UPPER_STEAL))
224 /* new->next = head. */
225 new.next = (head & TDB_OFF_MASK);
227 /* First element? Prev points to ourselves. */
229 new.magic_and_prev = (magic | off);
231 /* new->prev = next->prev */
232 prev = tdb_read_off(tdb,
233 new.next + offsetof(struct tdb_free_record,
235 new.magic_and_prev = prev;
236 if (frec_magic(&new) != TDB_FREE_MAGIC) {
237 return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
238 "enqueue_in_free: %llu bad head"
243 /* next->prev = new. */
244 ecode = tdb_write_off(tdb, new.next
245 + offsetof(struct tdb_free_record,
248 if (ecode != TDB_SUCCESS) {
252 #ifdef CCAN_TDB2_DEBUG
253 prev = tdb_read_off(tdb, frec_prev(&new)
254 + offsetof(struct tdb_free_record, next));
256 return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
258 " %llu bad tail next ptr %llu",
259 (long long)frec_prev(&new)
260 + offsetof(struct tdb_free_record,
267 /* Update enqueue count, but don't set high bit: see TDB_OFF_IS_ERR */
269 head += (1ULL << (64 - TDB_OFF_UPPER_STEAL));
270 head &= ~(TDB_OFF_MASK | (1ULL << 63));
273 ecode = tdb_write_off(tdb, b_off, head);
274 if (ecode != TDB_SUCCESS) {
278 /* It's time to coalesce if counter wrapped. */
280 *coalesce = ((head & ~TDB_OFF_MASK) == 0);
282 return tdb_write_convert(tdb, off, &new, sizeof(new));
285 static tdb_off_t ftable_offset(struct tdb_context *tdb, unsigned int ftable)
290 if (likely(tdb->ftable == ftable))
291 return tdb->ftable_off;
293 off = first_ftable(tdb);
294 for (i = 0; i < ftable; i++) {
295 if (TDB_OFF_IS_ERR(off)) {
298 off = next_ftable(tdb, off);
303 /* Note: we unlock the current bucket if fail (-ve), or coalesce (+ve) and
304 * need to blatt the *protect record (which is set to an error). */
305 static tdb_len_t coalesce(struct tdb_context *tdb,
306 tdb_off_t off, tdb_off_t b_off,
311 struct tdb_free_record rec;
312 enum TDB_ERROR ecode;
314 tdb->stats.alloc_coalesce_tried++;
315 end = off + sizeof(struct tdb_used_record) + data_len;
317 while (end < tdb->file->map_size) {
318 const struct tdb_free_record *r;
320 unsigned ftable, bucket;
322 r = tdb_access_read(tdb, end, sizeof(*r), true);
323 if (TDB_PTR_IS_ERR(r)) {
324 ecode = TDB_PTR_ERR(r);
328 if (frec_magic(r) != TDB_FREE_MAGIC
329 || frec_ftable(r) == TDB_FTABLE_NONE) {
330 tdb_access_release(tdb, r);
334 ftable = frec_ftable(r);
335 bucket = size_to_bucket(frec_len(r));
336 nb_off = ftable_offset(tdb, ftable);
337 if (TDB_OFF_IS_ERR(nb_off)) {
338 tdb_access_release(tdb, r);
342 nb_off = bucket_off(nb_off, bucket);
343 tdb_access_release(tdb, r);
345 /* We may be violating lock order here, so best effort. */
346 if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT)
348 tdb->stats.alloc_coalesce_lockfail++;
352 /* Now we have lock, re-check. */
353 ecode = tdb_read_convert(tdb, end, &rec, sizeof(rec));
354 if (ecode != TDB_SUCCESS) {
355 tdb_unlock_free_bucket(tdb, nb_off);
359 if (unlikely(frec_magic(&rec) != TDB_FREE_MAGIC)) {
360 tdb->stats.alloc_coalesce_race++;
361 tdb_unlock_free_bucket(tdb, nb_off);
365 if (unlikely(frec_ftable(&rec) != ftable)
366 || unlikely(size_to_bucket(frec_len(&rec)) != bucket)) {
367 tdb->stats.alloc_coalesce_race++;
368 tdb_unlock_free_bucket(tdb, nb_off);
372 /* Did we just mess up a record you were hoping to use? */
374 *protect = TDB_ERR_NOEXIST;
376 ecode = remove_from_list(tdb, nb_off, end, &rec);
377 check_list(tdb, nb_off);
378 if (ecode != TDB_SUCCESS) {
379 tdb_unlock_free_bucket(tdb, nb_off);
383 end += sizeof(struct tdb_used_record) + frec_len(&rec);
384 tdb_unlock_free_bucket(tdb, nb_off);
385 tdb->stats.alloc_coalesce_num_merged++;
388 /* Didn't find any adjacent free? */
389 if (end == off + sizeof(struct tdb_used_record) + data_len)
392 /* Before we expand, check this isn't one you wanted protected? */
394 *protect = TDB_ERR_EXISTS;
396 /* OK, expand initial record */
397 ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
398 if (ecode != TDB_SUCCESS) {
402 if (frec_len(&rec) != data_len) {
403 ecode = tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
404 "coalesce: expected data len %zu not %zu",
405 (size_t)data_len, (size_t)frec_len(&rec));
409 ecode = remove_from_list(tdb, b_off, off, &rec);
410 check_list(tdb, b_off);
411 if (ecode != TDB_SUCCESS) {
415 /* Try locking violation first. We don't allow coalesce recursion! */
416 ecode = add_free_record(tdb, off, end - off, TDB_LOCK_NOWAIT, false);
417 if (ecode != TDB_SUCCESS) {
418 /* Need to drop lock. Can't rely on anything stable. */
419 *protect = TDB_ERR_CORRUPT;
421 /* We have to drop this to avoid deadlocks, so make sure record
422 * doesn't get coalesced by someone else! */
423 rec.ftable_and_len = (TDB_FTABLE_NONE
424 << (64 - TDB_OFF_UPPER_STEAL))
425 | (end - off - sizeof(struct tdb_used_record));
426 ecode = tdb_write_off(tdb,
427 off + offsetof(struct tdb_free_record,
430 if (ecode != TDB_SUCCESS) {
434 tdb->stats.alloc_coalesce_succeeded++;
435 tdb_unlock_free_bucket(tdb, b_off);
437 ecode = add_free_record(tdb, off, end - off, TDB_LOCK_WAIT,
439 if (ecode != TDB_SUCCESS) {
442 } else if (TDB_OFF_IS_ERR(*protect)) {
443 /* For simplicity, we always drop lock if they can't continue */
444 tdb_unlock_free_bucket(tdb, b_off);
447 /* Return usable length. */
448 return end - off - sizeof(struct tdb_used_record);
451 /* To unify error paths, we *always* unlock bucket on error. */
452 tdb_unlock_free_bucket(tdb, b_off);
456 /* List is locked: we unlock it. */
457 static enum TDB_ERROR coalesce_list(struct tdb_context *tdb,
458 tdb_off_t ftable_off, tdb_off_t b_off)
460 enum TDB_ERROR ecode;
463 off = tdb_read_off(tdb, b_off);
464 if (TDB_OFF_IS_ERR(off)) {
468 /* A little bit of paranoia */
472 struct tdb_free_record rec;
476 ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
477 if (ecode != TDB_SUCCESS)
481 coal = coalesce(tdb, off, b_off, frec_len(&rec), &next);
482 if (TDB_OFF_IS_ERR(coal)) {
483 /* This has already unlocked on error. */
486 if (TDB_OFF_IS_ERR(next)) {
487 /* Coalescing had to unlock, so stop. */
493 tdb_unlock_free_bucket(tdb, b_off);
497 tdb_unlock_free_bucket(tdb, b_off);
501 /* List must not be locked if coalesce_ok is set. */
502 enum TDB_ERROR add_free_record(struct tdb_context *tdb,
503 tdb_off_t off, tdb_len_t len_with_header,
504 enum tdb_lock_flags waitflag,
509 enum TDB_ERROR ecode;
511 assert(len_with_header >= sizeof(struct tdb_free_record));
513 len = len_with_header - sizeof(struct tdb_used_record);
515 b_off = bucket_off(tdb->ftable_off, size_to_bucket(len));
516 ecode = tdb_lock_free_bucket(tdb, b_off, waitflag);
517 if (ecode != TDB_SUCCESS) {
521 ecode = enqueue_in_free(tdb, b_off, off, len, &coalesce);
522 check_list(tdb, b_off);
524 /* Coalescing unlocks free list. */
525 if (!ecode && coalesce)
526 ecode = coalesce_list(tdb, tdb->ftable_off, b_off);
528 tdb_unlock_free_bucket(tdb, b_off);
532 static size_t adjust_size(size_t keylen, size_t datalen)
534 size_t size = keylen + datalen;
536 if (size < TDB_MIN_DATA_LEN)
537 size = TDB_MIN_DATA_LEN;
539 /* Round to next uint64_t boundary. */
540 return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
543 /* If we have enough left over to be useful, split that off. */
544 static size_t record_leftover(size_t keylen, size_t datalen,
545 bool want_extra, size_t total_len)
550 datalen += datalen / 2;
551 leftover = total_len - adjust_size(keylen, datalen);
553 if (leftover < (ssize_t)sizeof(struct tdb_free_record))
559 /* We need size bytes to put our key and data in. */
560 static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
561 tdb_off_t ftable_off,
563 size_t keylen, size_t datalen,
568 tdb_off_t off, b_off,best_off;
569 struct tdb_free_record best = { 0 };
571 size_t size = adjust_size(keylen, datalen);
572 enum TDB_ERROR ecode;
575 b_off = bucket_off(ftable_off, bucket);
577 /* FIXME: Try non-blocking wait first, to measure contention. */
578 /* Lock this bucket. */
579 ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT);
580 if (ecode != TDB_SUCCESS) {
584 best.ftable_and_len = -1ULL;
587 /* Get slack if we're after extra. */
593 /* Walk the list to see if any are large enough, getting less fussy
595 off = tdb_read_off(tdb, b_off);
596 if (TDB_OFF_IS_ERR(off)) {
603 const struct tdb_free_record *r;
607 r = tdb_access_read(tdb, off, sizeof(*r), true);
608 if (TDB_PTR_IS_ERR(r)) {
609 ecode = TDB_PTR_ERR(r);
613 if (frec_magic(r) != TDB_FREE_MAGIC) {
614 ecode = tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
616 " %llu non-free 0x%llx",
618 (long long)r->magic_and_prev);
619 tdb_access_release(tdb, r);
623 if (frec_len(r) >= size && frec_len(r) < frec_len(&best)) {
628 if (frec_len(&best) <= size * multiplier && best_off) {
629 tdb_access_release(tdb, r);
637 tdb_access_release(tdb, r);
641 /* If we found anything at all, use it. */
643 struct tdb_used_record rec;
646 /* We're happy with this size: take it. */
647 ecode = remove_from_list(tdb, b_off, best_off, &best);
648 check_list(tdb, b_off);
649 if (ecode != TDB_SUCCESS) {
653 leftover = record_leftover(keylen, datalen, want_extra,
656 assert(keylen + datalen + leftover <= frec_len(&best));
657 /* We need to mark non-free before we drop lock, otherwise
658 * coalesce() could try to merge it! */
659 ecode = set_header(tdb, &rec, magic, keylen, datalen,
660 frec_len(&best) - leftover, hashlow);
661 if (ecode != TDB_SUCCESS) {
665 ecode = tdb_write_convert(tdb, best_off, &rec, sizeof(rec));
666 if (ecode != TDB_SUCCESS) {
670 /* For futureproofing, we put a 0 in any unused space. */
671 if (rec_extra_padding(&rec)) {
672 ecode = tdb->methods->twrite(tdb, best_off + sizeof(rec)
673 + keylen + datalen, "", 1);
674 if (ecode != TDB_SUCCESS) {
679 /* Bucket of leftover will be <= current bucket, so nested
680 * locking is allowed. */
682 tdb->stats.alloc_leftover++;
683 ecode = add_free_record(tdb,
684 best_off + sizeof(rec)
685 + frec_len(&best) - leftover,
686 leftover, TDB_LOCK_WAIT, false);
687 if (ecode != TDB_SUCCESS) {
691 tdb_unlock_free_bucket(tdb, b_off);
696 tdb_unlock_free_bucket(tdb, b_off);
700 tdb_unlock_free_bucket(tdb, b_off);
704 /* Get a free block from current free list, or 0 if none, -ve on error. */
705 static tdb_off_t get_free(struct tdb_context *tdb,
706 size_t keylen, size_t datalen, bool want_extra,
707 unsigned magic, unsigned hashlow)
709 tdb_off_t off, ftable_off;
710 tdb_off_t start_b, b, ftable;
711 bool wrapped = false;
713 /* If they are growing, add 50% to get to higher bucket. */
715 start_b = size_to_bucket(adjust_size(keylen,
716 datalen + datalen / 2));
718 start_b = size_to_bucket(adjust_size(keylen, datalen));
720 ftable_off = tdb->ftable_off;
721 ftable = tdb->ftable;
722 while (!wrapped || ftable_off != tdb->ftable_off) {
723 /* Start at exact size bucket, and search up... */
724 for (b = find_free_head(tdb, ftable_off, start_b);
725 b < TDB_FREE_BUCKETS;
726 b = find_free_head(tdb, ftable_off, b + 1)) {
727 /* Try getting one from list. */
728 off = lock_and_alloc(tdb, ftable_off,
729 b, keylen, datalen, want_extra,
731 if (TDB_OFF_IS_ERR(off))
735 tdb->stats.alloc_bucket_exact++;
736 if (b == TDB_FREE_BUCKETS - 1)
737 tdb->stats.alloc_bucket_max++;
738 /* Worked? Stay using this list. */
739 tdb->ftable_off = ftable_off;
740 tdb->ftable = ftable;
743 /* Didn't work. Try next bucket. */
746 if (TDB_OFF_IS_ERR(b)) {
750 /* Hmm, try next table. */
751 ftable_off = next_ftable(tdb, ftable_off);
752 if (TDB_OFF_IS_ERR(ftable_off)) {
757 if (ftable_off == 0) {
759 ftable_off = first_ftable(tdb);
760 if (TDB_OFF_IS_ERR(ftable_off)) {
770 enum TDB_ERROR set_header(struct tdb_context *tdb,
771 struct tdb_used_record *rec,
772 unsigned magic, uint64_t keylen, uint64_t datalen,
773 uint64_t actuallen, unsigned hashlow)
775 uint64_t keybits = (fls64(keylen) + 1) / 2;
777 /* Use bottom bits of hash, so it's independent of hash table size. */
778 rec->magic_and_meta = (hashlow & ((1 << 11)-1))
779 | ((actuallen - (keylen + datalen)) << 11)
781 | ((uint64_t)magic << 48);
782 rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
784 /* Encoding can fail on big values. */
785 if (rec_key_length(rec) != keylen
786 || rec_data_length(rec) != datalen
787 || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
788 return tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
789 "Could not encode k=%llu,d=%llu,a=%llu",
790 (long long)keylen, (long long)datalen,
791 (long long)actuallen);
796 /* Expand the database. */
797 static enum TDB_ERROR tdb_expand(struct tdb_context *tdb, tdb_len_t size)
799 uint64_t old_size, rec_size, map_size;
801 enum TDB_ERROR ecode;
803 /* Need to hold a hash lock to expand DB: transactions rely on it. */
804 if (!(tdb->flags & TDB_NOLOCK)
805 && !tdb->file->allrecord_lock.count && !tdb_has_hash_locks(tdb)) {
806 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
807 "tdb_expand: must hold lock during expand");
810 /* Only one person can expand file at a time. */
811 ecode = tdb_lock_expand(tdb, F_WRLCK);
812 if (ecode != TDB_SUCCESS) {
816 /* Someone else may have expanded the file, so retry. */
817 old_size = tdb->file->map_size;
818 tdb->methods->oob(tdb, tdb->file->map_size + 1, true);
819 if (tdb->file->map_size != old_size) {
820 tdb_unlock_expand(tdb, F_WRLCK);
824 /* limit size in order to avoid using up huge amounts of memory for
825 * in memory tdbs if an oddball huge record creeps in */
826 if (size > 100 * 1024) {
829 rec_size = size * 100;
832 /* always make room for at least rec_size more records, and at
833 least 25% more space. if the DB is smaller than 100MiB,
834 otherwise grow it by 10% only. */
835 if (old_size > 100 * 1024 * 1024) {
836 map_size = old_size / 10;
838 map_size = old_size / 4;
841 if (map_size > rec_size) {
847 /* We need room for the record header too. */
848 wanted = adjust_size(0, sizeof(struct tdb_used_record) + wanted);
850 ecode = tdb->methods->expand_file(tdb, wanted);
851 if (ecode != TDB_SUCCESS) {
852 tdb_unlock_expand(tdb, F_WRLCK);
856 /* We need to drop this lock before adding free record. */
857 tdb_unlock_expand(tdb, F_WRLCK);
859 tdb->stats.expands++;
860 return add_free_record(tdb, old_size, wanted, TDB_LOCK_WAIT, true);
863 /* This won't fail: it will expand the database if it has to. */
864 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
865 uint64_t hash, unsigned magic, bool growing)
869 /* We can't hold pointers during this: we could unmap! */
870 assert(!tdb->direct_access);
873 enum TDB_ERROR ecode;
874 off = get_free(tdb, keylen, datalen, growing, magic, hash);
875 if (likely(off != 0))
878 ecode = tdb_expand(tdb, adjust_size(keylen, datalen));
879 if (ecode != TDB_SUCCESS) {