897d3cbe4627eaf69c9bf985a53608741b045fe5
[ccan] / ccan / tdb2 / free.c
1  /*
2    Trivial Database 2: free list/block handling
3    Copyright (C) Rusty Russell 2010
4
5    This library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 3 of the License, or (at your option) any later version.
9
10    This library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 */
18 #include "private.h"
19 #include <ccan/likely/likely.h>
20 #include <ccan/ilog/ilog.h>
21 #include <time.h>
22 #include <assert.h>
23 #include <limits.h>
24
25 static unsigned fls64(uint64_t val)
26 {
27         return ilog64(val);
28 }
29
30 /* In which bucket would we find a particular record size? (ignoring header) */
31 unsigned int size_to_bucket(tdb_len_t data_len)
32 {
33         unsigned int bucket;
34
35         /* We can't have records smaller than this. */
36         assert(data_len >= TDB_MIN_DATA_LEN);
37
38         /* Ignoring the header... */
39         if (data_len - TDB_MIN_DATA_LEN <= 64) {
40                 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 8. */
41                 bucket = (data_len - TDB_MIN_DATA_LEN) / 8;
42         } else {
43                 /* After that we go power of 2. */
44                 bucket = fls64(data_len - TDB_MIN_DATA_LEN) + 2;
45         }
46
47         if (unlikely(bucket >= TDB_FREE_BUCKETS))
48                 bucket = TDB_FREE_BUCKETS - 1;
49         return bucket;
50 }
51
52 tdb_off_t first_ftable(struct tdb_context *tdb)
53 {
54         return tdb_read_off(tdb, offsetof(struct tdb_header, free_table));
55 }
56
57 tdb_off_t next_ftable(struct tdb_context *tdb, tdb_off_t ftable)
58 {
59         return tdb_read_off(tdb, ftable + offsetof(struct tdb_freetable,next));
60 }
61
62 int tdb_ftable_init(struct tdb_context *tdb)
63 {
64         /* Use reservoir sampling algorithm to select a free list at random. */
65         unsigned int rnd, max = 0, count = 0;
66         tdb_off_t off;
67
68         tdb->ftable_off = off = first_ftable(tdb);
69         tdb->ftable = 0;
70
71         while (off) {
72                 if (TDB_OFF_IS_ERR(off)) {
73                         tdb->ecode = off;
74                         return -1;
75                 }
76
77                 rnd = random();
78                 if (rnd >= max) {
79                         tdb->ftable_off = off;
80                         tdb->ftable = count;
81                         max = rnd;
82                 }
83
84                 off = next_ftable(tdb, off);
85                 count++;
86         }
87         return 0;
88 }
89
90 /* Offset of a given bucket. */
91 tdb_off_t bucket_off(tdb_off_t ftable_off, unsigned bucket)
92 {
93         return ftable_off + offsetof(struct tdb_freetable, buckets)
94                 + bucket * sizeof(tdb_off_t);
95 }
96
97 /* Returns free_buckets + 1, or list number to search, or -ve error. */
98 static tdb_off_t find_free_head(struct tdb_context *tdb,
99                                 tdb_off_t ftable_off,
100                                 tdb_off_t bucket)
101 {
102         /* Speculatively search for a non-zero bucket. */
103         return tdb_find_nonzero_off(tdb, bucket_off(ftable_off, 0),
104                                     bucket, TDB_FREE_BUCKETS);
105 }
106
107 /* Remove from free bucket. */
108 static int remove_from_list(struct tdb_context *tdb,
109                             tdb_off_t b_off, tdb_off_t r_off,
110                             const struct tdb_free_record *r)
111 {
112         tdb_off_t off;
113         enum TDB_ERROR ecode;
114
115         /* Front of list? */
116         if (frec_prev(r) == 0) {
117                 off = b_off;
118         } else {
119                 off = frec_prev(r) + offsetof(struct tdb_free_record, next);
120         }
121
122 #ifdef CCAN_TDB2_DEBUG
123         if (tdb_read_off(tdb, off) != r_off) {
124                 tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
125                          "remove_from_list: %llu bad prev in list %llu",
126                          (long long)r_off, (long long)b_off);
127                 return -1;
128         }
129 #endif
130
131         /* r->prev->next = r->next */
132         ecode = tdb_write_off(tdb, off, r->next);
133         if (ecode != TDB_SUCCESS) {
134                 tdb->ecode = ecode;
135                 return -1;
136         }
137
138         if (r->next != 0) {
139                 off = r->next + offsetof(struct tdb_free_record,magic_and_prev);
140                 /* r->next->prev = r->prev */
141
142 #ifdef CCAN_TDB2_DEBUG
143                 if (tdb_read_off(tdb, off) & TDB_OFF_MASK != r_off) {
144                         tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
145                                    "remove_from_list: %llu bad list %llu",
146                                    (long long)r_off, (long long)b_off);
147                         return -1;
148                 }
149 #endif
150
151                 ecode = tdb_write_off(tdb, off, r->magic_and_prev);
152                 if (ecode != TDB_SUCCESS) {
153                         tdb->ecode = ecode;
154                         return -1;
155                 }
156         }
157         return 0;
158 }
159
160 /* Enqueue in this free bucket. */
161 static int enqueue_in_free(struct tdb_context *tdb,
162                            tdb_off_t b_off,
163                            tdb_off_t off,
164                            tdb_len_t len)
165 {
166         struct tdb_free_record new;
167         enum TDB_ERROR ecode;
168         uint64_t magic = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL));
169
170         /* We only need to set ftable_and_len; rest is set in enqueue_in_free */
171         new.ftable_and_len = ((uint64_t)tdb->ftable << (64 - TDB_OFF_UPPER_STEAL))
172                 | len;
173         /* prev = 0. */
174         new.magic_and_prev = magic;
175
176         /* new->next = head. */
177         new.next = tdb_read_off(tdb, b_off);
178         if (TDB_OFF_IS_ERR(new.next)) {
179                 tdb->ecode = new.next;
180                 return -1;
181         }
182
183         if (new.next) {
184 #ifdef CCAN_TDB2_DEBUG
185                 if (tdb_read_off(tdb,
186                                  new.next + offsetof(struct tdb_free_record,
187                                                      magic_and_prev))
188                     != magic) {
189                         tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
190                                    "enqueue_in_free: %llu bad head"
191                                    " prev %llu",
192                                    (long long)new.next, (long long)b_off);
193                         return -1;
194                 }
195 #endif
196                 /* next->prev = new. */
197                 ecode = tdb_write_off(tdb, new.next
198                                       + offsetof(struct tdb_free_record,
199                                                  magic_and_prev),
200                                       off | magic);
201                 if (ecode != TDB_SUCCESS) {
202                         tdb->ecode = ecode;
203                         return -1;
204                 }
205         }
206         /* head = new */
207         ecode = tdb_write_off(tdb, b_off, off);
208         if (ecode != TDB_SUCCESS) {
209                 tdb->ecode = ecode;
210                 return -1;
211         }
212
213         ecode = tdb_write_convert(tdb, off, &new, sizeof(new));
214         if (ecode != TDB_SUCCESS) {
215                 tdb->ecode = ecode;
216                 return -1;
217         }
218         return 0;
219 }
220
221 /* List need not be locked. */
222 int add_free_record(struct tdb_context *tdb,
223                     tdb_off_t off, tdb_len_t len_with_header)
224 {
225         tdb_off_t b_off;
226         tdb_len_t len;
227         int ret;
228         enum TDB_ERROR ecode;
229
230         assert(len_with_header >= sizeof(struct tdb_free_record));
231
232         len = len_with_header - sizeof(struct tdb_used_record);
233
234         b_off = bucket_off(tdb->ftable_off, size_to_bucket(len));
235         ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT);
236         if (ecode != TDB_SUCCESS) {
237                 tdb->ecode = ecode;
238                 return -1;
239         }
240
241         ret = enqueue_in_free(tdb, b_off, off, len);
242         tdb_unlock_free_bucket(tdb, b_off);
243         return ret;
244 }
245
246 static size_t adjust_size(size_t keylen, size_t datalen)
247 {
248         size_t size = keylen + datalen;
249
250         if (size < TDB_MIN_DATA_LEN)
251                 size = TDB_MIN_DATA_LEN;
252
253         /* Round to next uint64_t boundary. */
254         return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
255 }
256
257 /* If we have enough left over to be useful, split that off. */
258 static size_t record_leftover(size_t keylen, size_t datalen,
259                               bool want_extra, size_t total_len)
260 {
261         ssize_t leftover;
262
263         if (want_extra)
264                 datalen += datalen / 2;
265         leftover = total_len - adjust_size(keylen, datalen);
266
267         if (leftover < (ssize_t)sizeof(struct tdb_free_record))
268                 return 0;
269
270         return leftover;
271 }
272
273 static tdb_off_t ftable_offset(struct tdb_context *tdb, unsigned int ftable)
274 {
275         tdb_off_t off;
276         unsigned int i;
277
278         if (likely(tdb->ftable == ftable))
279                 return tdb->ftable_off;
280
281         off = first_ftable(tdb);
282         for (i = 0; i < ftable; i++) {
283                 if (TDB_OFF_IS_ERR(off)) {
284                         tdb->ecode = off;
285                         break;
286                 }
287                 off = next_ftable(tdb, off);
288         }
289         return off;
290 }
291
292 /* Note: we unlock the current bucket if we coalesce or fail. */
293 static int coalesce(struct tdb_context *tdb,
294                     tdb_off_t off, tdb_off_t b_off, tdb_len_t data_len)
295 {
296         tdb_off_t end;
297         struct tdb_free_record rec;
298         enum TDB_ERROR ecode;
299
300         add_stat(tdb, alloc_coalesce_tried, 1);
301         end = off + sizeof(struct tdb_used_record) + data_len;
302
303         while (end < tdb->map_size) {
304                 const struct tdb_free_record *r;
305                 tdb_off_t nb_off;
306                 unsigned ftable, bucket;
307
308                 r = tdb_access_read(tdb, end, sizeof(*r), true);
309                 if (TDB_PTR_IS_ERR(r)) {
310                         tdb->ecode = TDB_PTR_ERR(r);
311                         goto err;
312                 }
313
314                 if (frec_magic(r) != TDB_FREE_MAGIC
315                     || frec_ftable(r) == TDB_FTABLE_NONE) {
316                         tdb_access_release(tdb, r);
317                         break;
318                 }
319
320                 ftable = frec_ftable(r);
321                 bucket = size_to_bucket(frec_len(r));
322                 nb_off = bucket_off(ftable_offset(tdb, ftable), bucket);
323                 tdb_access_release(tdb, r);
324
325                 /* We may be violating lock order here, so best effort. */
326                 if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT)
327                     != TDB_SUCCESS) {
328                         add_stat(tdb, alloc_coalesce_lockfail, 1);
329                         break;
330                 }
331
332                 /* Now we have lock, re-check. */
333                 ecode = tdb_read_convert(tdb, end, &rec, sizeof(rec));
334                 if (ecode != TDB_SUCCESS) {
335                         tdb->ecode = ecode;
336                         tdb_unlock_free_bucket(tdb, nb_off);
337                         goto err;
338                 }
339
340                 if (unlikely(frec_magic(&rec) != TDB_FREE_MAGIC)) {
341                         add_stat(tdb, alloc_coalesce_race, 1);
342                         tdb_unlock_free_bucket(tdb, nb_off);
343                         break;
344                 }
345
346                 if (unlikely(frec_ftable(&rec) != ftable)
347                     || unlikely(size_to_bucket(frec_len(&rec)) != bucket)) {
348                         add_stat(tdb, alloc_coalesce_race, 1);
349                         tdb_unlock_free_bucket(tdb, nb_off);
350                         break;
351                 }
352
353                 if (remove_from_list(tdb, nb_off, end, &rec) == -1) {
354                         tdb_unlock_free_bucket(tdb, nb_off);
355                         goto err;
356                 }
357
358                 end += sizeof(struct tdb_used_record) + frec_len(&rec);
359                 tdb_unlock_free_bucket(tdb, nb_off);
360                 add_stat(tdb, alloc_coalesce_num_merged, 1);
361         }
362
363         /* Didn't find any adjacent free? */
364         if (end == off + sizeof(struct tdb_used_record) + data_len)
365                 return 0;
366
367         /* OK, expand initial record */
368         ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
369         if (ecode != TDB_SUCCESS) {
370                 tdb->ecode = ecode;
371                 goto err;
372         }
373
374         if (frec_len(&rec) != data_len) {
375                 tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
376                            "coalesce: expected data len %zu not %zu",
377                            (size_t)data_len, (size_t)frec_len(&rec));
378                 goto err;
379         }
380
381         if (remove_from_list(tdb, b_off, off, &rec) == -1)
382                 goto err;
383
384         /* We have to drop this to avoid deadlocks, so make sure record
385          * doesn't get coalesced by someone else! */
386         rec.ftable_and_len = (TDB_FTABLE_NONE << (64 - TDB_OFF_UPPER_STEAL))
387                 | (end - off - sizeof(struct tdb_used_record));
388         ecode = tdb_write_off(tdb, off + offsetof(struct tdb_free_record,
389                                                   ftable_and_len),
390                               rec.ftable_and_len);
391         if (ecode != TDB_SUCCESS) {
392                 tdb->ecode = ecode;
393                 goto err;
394         }
395
396         add_stat(tdb, alloc_coalesce_succeeded, 1);
397         tdb_unlock_free_bucket(tdb, b_off);
398
399         if (add_free_record(tdb, off, end - off) == -1)
400                 return -1;
401         return 1;
402
403 err:
404         /* To unify error paths, we *always* unlock bucket on error. */
405         tdb_unlock_free_bucket(tdb, b_off);
406         return -1;
407 }
408
409 /* We need size bytes to put our key and data in. */
410 static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
411                                 tdb_off_t ftable_off,
412                                 tdb_off_t bucket,
413                                 size_t keylen, size_t datalen,
414                                 bool want_extra,
415                                 unsigned magic,
416                                 unsigned hashlow)
417 {
418         tdb_off_t off, b_off,best_off;
419         struct tdb_free_record best = { 0 };
420         double multiplier;
421         size_t size = adjust_size(keylen, datalen);
422         enum TDB_ERROR ecode;
423
424         add_stat(tdb, allocs, 1);
425 again:
426         b_off = bucket_off(ftable_off, bucket);
427
428         /* FIXME: Try non-blocking wait first, to measure contention. */
429         /* Lock this bucket. */
430         ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT);
431         if (ecode != TDB_SUCCESS) {
432                 tdb->ecode = ecode;
433                 return TDB_OFF_ERR;
434         }
435
436         best.ftable_and_len = -1ULL;
437         best_off = 0;
438
439         /* Get slack if we're after extra. */
440         if (want_extra)
441                 multiplier = 1.5;
442         else
443                 multiplier = 1.0;
444
445         /* Walk the list to see if any are large enough, getting less fussy
446          * as we go. */
447         off = tdb_read_off(tdb, b_off);
448         if (TDB_OFF_IS_ERR(off)) {
449                 tdb->ecode = off;
450                 goto unlock_err;
451         }
452
453         while (off) {
454                 const struct tdb_free_record *r;
455                 tdb_len_t len;
456                 tdb_off_t next;
457
458                 r = tdb_access_read(tdb, off, sizeof(*r), true);
459                 if (TDB_PTR_IS_ERR(r)) {
460                         tdb->ecode = TDB_PTR_ERR(r);
461                         goto unlock_err;
462                 }
463
464                 if (frec_magic(r) != TDB_FREE_MAGIC) {
465                         tdb_access_release(tdb, r);
466                         tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
467                                  "lock_and_alloc: %llu non-free 0x%llx",
468                                  (long long)off, (long long)r->magic_and_prev);
469                         goto unlock_err;
470                 }
471
472                 if (frec_len(r) >= size && frec_len(r) < frec_len(&best)) {
473                         best_off = off;
474                         best = *r;
475                 }
476
477                 if (frec_len(&best) < size * multiplier && best_off) {
478                         tdb_access_release(tdb, r);
479                         break;
480                 }
481
482                 multiplier *= 1.01;
483
484                 next = r->next;
485                 len = frec_len(r);
486                 tdb_access_release(tdb, r);
487
488                 /* Since we're going slow anyway, try coalescing here. */
489                 switch (coalesce(tdb, off, b_off, len)) {
490                 case -1:
491                         /* This has already unlocked on error. */
492                         return -1;
493                 case 1:
494                         /* This has unlocked list, restart. */
495                         goto again;
496                 }
497                 off = next;
498         }
499
500         /* If we found anything at all, use it. */
501         if (best_off) {
502                 struct tdb_used_record rec;
503                 size_t leftover;
504
505                 /* We're happy with this size: take it. */
506                 if (remove_from_list(tdb, b_off, best_off, &best) != 0)
507                         goto unlock_err;
508
509                 leftover = record_leftover(keylen, datalen, want_extra,
510                                            frec_len(&best));
511
512                 assert(keylen + datalen + leftover <= frec_len(&best));
513                 /* We need to mark non-free before we drop lock, otherwise
514                  * coalesce() could try to merge it! */
515                 if (set_header(tdb, &rec, magic, keylen, datalen,
516                                frec_len(&best) - leftover, hashlow) != 0)
517                         goto unlock_err;
518
519                 ecode = tdb_write_convert(tdb, best_off, &rec, sizeof(rec));
520                 if (ecode != TDB_SUCCESS) {
521                         tdb->ecode = ecode;
522                         goto unlock_err;
523                 }
524
525                 /* Bucket of leftover will be <= current bucket, so nested
526                  * locking is allowed. */
527                 if (leftover) {
528                         add_stat(tdb, alloc_leftover, 1);
529                         if (add_free_record(tdb,
530                                             best_off + sizeof(rec)
531                                             + frec_len(&best) - leftover,
532                                             leftover))
533                                 best_off = TDB_OFF_ERR;
534                 }
535                 tdb_unlock_free_bucket(tdb, b_off);
536
537                 return best_off;
538         }
539
540         tdb_unlock_free_bucket(tdb, b_off);
541         return 0;
542
543 unlock_err:
544         tdb_unlock_free_bucket(tdb, b_off);
545         return TDB_OFF_ERR;
546 }
547
548 /* Get a free block from current free list, or 0 if none. */
549 static tdb_off_t get_free(struct tdb_context *tdb,
550                           size_t keylen, size_t datalen, bool want_extra,
551                           unsigned magic, unsigned hashlow)
552 {
553         tdb_off_t off, ftable_off;
554         tdb_off_t start_b, b, ftable;
555         bool wrapped = false;
556
557         /* If they are growing, add 50% to get to higher bucket. */
558         if (want_extra)
559                 start_b = size_to_bucket(adjust_size(keylen,
560                                                      datalen + datalen / 2));
561         else
562                 start_b = size_to_bucket(adjust_size(keylen, datalen));
563
564         ftable_off = tdb->ftable_off;
565         ftable = tdb->ftable;
566         while (!wrapped || ftable_off != tdb->ftable_off) {
567                 /* Start at exact size bucket, and search up... */
568                 for (b = find_free_head(tdb, ftable_off, start_b);
569                      b < TDB_FREE_BUCKETS;
570                      b = find_free_head(tdb, ftable_off, b + 1)) {
571                         /* Try getting one from list. */
572                         off = lock_and_alloc(tdb, ftable_off,
573                                              b, keylen, datalen, want_extra,
574                                              magic, hashlow);
575                         if (off == TDB_OFF_ERR)
576                                 return TDB_OFF_ERR;
577                         if (off != 0) {
578                                 if (b == start_b)
579                                         add_stat(tdb, alloc_bucket_exact, 1);
580                                 if (b == TDB_FREE_BUCKETS - 1)
581                                         add_stat(tdb, alloc_bucket_max, 1);
582                                 /* Worked?  Stay using this list. */
583                                 tdb->ftable_off = ftable_off;
584                                 tdb->ftable = ftable;
585                                 return off;
586                         }
587                         /* Didn't work.  Try next bucket. */
588                 }
589
590                 if (TDB_OFF_IS_ERR(b)) {
591                         tdb->ecode = b;
592                         return 0;
593                 }
594
595                 /* Hmm, try next table. */
596                 ftable_off = next_ftable(tdb, ftable_off);
597                 if (TDB_OFF_IS_ERR(ftable_off)) {
598                         tdb->ecode = ftable_off;
599                         return 0;
600                 }
601                 ftable++;
602
603                 if (ftable_off == 0) {
604                         wrapped = true;
605                         ftable_off = first_ftable(tdb);
606                         if (TDB_OFF_IS_ERR(ftable_off)) {
607                                 tdb->ecode = ftable_off;
608                                 return 0;
609                         }
610                         ftable = 0;
611                 }
612         }
613
614         return 0;
615 }
616
617 int set_header(struct tdb_context *tdb,
618                struct tdb_used_record *rec,
619                unsigned magic, uint64_t keylen, uint64_t datalen,
620                uint64_t actuallen, unsigned hashlow)
621 {
622         uint64_t keybits = (fls64(keylen) + 1) / 2;
623
624         /* Use bottom bits of hash, so it's independent of hash table size. */
625         rec->magic_and_meta = (hashlow & ((1 << 11)-1))
626                 | ((actuallen - (keylen + datalen)) << 11)
627                 | (keybits << 43)
628                 | ((uint64_t)magic << 48);
629         rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
630
631         /* Encoding can fail on big values. */
632         if (rec_key_length(rec) != keylen
633             || rec_data_length(rec) != datalen
634             || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
635                 tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
636                          "Could not encode k=%llu,d=%llu,a=%llu",
637                          (long long)keylen, (long long)datalen,
638                          (long long)actuallen);
639                 return -1;
640         }
641         return 0;
642 }
643
644 /* Expand the database. */
645 static int tdb_expand(struct tdb_context *tdb, tdb_len_t size)
646 {
647         uint64_t old_size;
648         tdb_len_t wanted;
649         enum TDB_ERROR ecode;
650
651         /* We need room for the record header too. */
652         wanted = sizeof(struct tdb_used_record) + size;
653
654         /* Need to hold a hash lock to expand DB: transactions rely on it. */
655         if (!(tdb->flags & TDB_NOLOCK)
656             && !tdb->allrecord_lock.count && !tdb_has_hash_locks(tdb)) {
657                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
658                            "tdb_expand: must hold lock during expand");
659                 return -1;
660         }
661
662         /* always make room for at least 100 more records, and at
663            least 25% more space. */
664         if (size * TDB_EXTENSION_FACTOR > tdb->map_size / 4)
665                 wanted = size * TDB_EXTENSION_FACTOR;
666         else
667                 wanted = tdb->map_size / 4;
668         wanted = adjust_size(0, wanted);
669
670         /* Only one person can expand file at a time. */
671         ecode = tdb_lock_expand(tdb, F_WRLCK);
672         if (ecode != TDB_SUCCESS) {
673                 tdb->ecode = ecode;
674                 return -1;
675         }
676
677         /* Someone else may have expanded the file, so retry. */
678         old_size = tdb->map_size;
679         tdb->methods->oob(tdb, tdb->map_size + 1, true);
680         if (tdb->map_size != old_size) {
681                 tdb_unlock_expand(tdb, F_WRLCK);
682                 return 0;
683         }
684
685         ecode = tdb->methods->expand_file(tdb, wanted);
686         if (ecode != TDB_SUCCESS) {
687                 tdb->ecode = ecode;
688                 tdb_unlock_expand(tdb, F_WRLCK);
689                 return -1;
690         }
691
692         /* We need to drop this lock before adding free record. */
693         tdb_unlock_expand(tdb, F_WRLCK);
694
695         add_stat(tdb, expands, 1);
696         return add_free_record(tdb, old_size, wanted);
697 }
698
699 /* This won't fail: it will expand the database if it has to. */
700 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
701                 uint64_t hash, unsigned magic, bool growing)
702 {
703         tdb_off_t off;
704
705         /* We can't hold pointers during this: we could unmap! */
706         assert(!tdb->direct_access);
707
708         for (;;) {
709                 off = get_free(tdb, keylen, datalen, growing, magic, hash);
710                 if (likely(off != 0))
711                         break;
712
713                 if (tdb_expand(tdb, adjust_size(keylen, datalen)))
714                         return TDB_OFF_ERR;
715         }
716
717         return off;
718 }