read_write_all: avoid arithmetic on void pointers.
[ccan] / ccan / tdb2 / free.c
1  /*
2    Trivial Database 2: free list/block handling
3    Copyright (C) Rusty Russell 2010
4
5    This library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 3 of the License, or (at your option) any later version.
9
10    This library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 */
18 #include "private.h"
19 #include <ccan/likely/likely.h>
20 #include <ccan/ilog/ilog.h>
21 #include <time.h>
22 #include <assert.h>
23 #include <limits.h>
24
25 static unsigned fls64(uint64_t val)
26 {
27         return ilog64(val);
28 }
29
30 /* In which bucket would we find a particular record size? (ignoring header) */
31 unsigned int size_to_bucket(tdb_len_t data_len)
32 {
33         unsigned int bucket;
34
35         /* We can't have records smaller than this. */
36         assert(data_len >= TDB_MIN_DATA_LEN);
37
38         /* Ignoring the header... */
39         if (data_len - TDB_MIN_DATA_LEN <= 64) {
40                 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 8. */
41                 bucket = (data_len - TDB_MIN_DATA_LEN) / 8;
42         } else {
43                 /* After that we go power of 2. */
44                 bucket = fls64(data_len - TDB_MIN_DATA_LEN) + 2;
45         }
46
47         if (unlikely(bucket >= TDB_FREE_BUCKETS))
48                 bucket = TDB_FREE_BUCKETS - 1;
49         return bucket;
50 }
51
52 tdb_off_t first_ftable(struct tdb_context *tdb)
53 {
54         return tdb_read_off(tdb, offsetof(struct tdb_header, free_table));
55 }
56
57 tdb_off_t next_ftable(struct tdb_context *tdb, tdb_off_t ftable)
58 {
59         return tdb_read_off(tdb, ftable + offsetof(struct tdb_freetable,next));
60 }
61
62 enum TDB_ERROR tdb_ftable_init(struct tdb_context *tdb)
63 {
64         /* Use reservoir sampling algorithm to select a free list at random. */
65         unsigned int rnd, max = 0, count = 0;
66         tdb_off_t off;
67
68         tdb->ftable_off = off = first_ftable(tdb);
69         tdb->ftable = 0;
70
71         while (off) {
72                 if (TDB_OFF_IS_ERR(off)) {
73                         return off;
74                 }
75
76                 rnd = random();
77                 if (rnd >= max) {
78                         tdb->ftable_off = off;
79                         tdb->ftable = count;
80                         max = rnd;
81                 }
82
83                 off = next_ftable(tdb, off);
84                 count++;
85         }
86         return TDB_SUCCESS;
87 }
88
89 /* Offset of a given bucket. */
90 tdb_off_t bucket_off(tdb_off_t ftable_off, unsigned bucket)
91 {
92         return ftable_off + offsetof(struct tdb_freetable, buckets)
93                 + bucket * sizeof(tdb_off_t);
94 }
95
96 /* Returns free_buckets + 1, or list number to search, or -ve error. */
97 static tdb_off_t find_free_head(struct tdb_context *tdb,
98                                 tdb_off_t ftable_off,
99                                 tdb_off_t bucket)
100 {
101         /* Speculatively search for a non-zero bucket. */
102         return tdb_find_nonzero_off(tdb, bucket_off(ftable_off, 0),
103                                     bucket, TDB_FREE_BUCKETS);
104 }
105
106 /* Remove from free bucket. */
107 static enum TDB_ERROR remove_from_list(struct tdb_context *tdb,
108                                        tdb_off_t b_off, tdb_off_t r_off,
109                                        const struct tdb_free_record *r)
110 {
111         tdb_off_t off;
112         enum TDB_ERROR ecode;
113
114         /* Front of list? */
115         if (frec_prev(r) == 0) {
116                 off = b_off;
117         } else {
118                 off = frec_prev(r) + offsetof(struct tdb_free_record, next);
119         }
120
121 #ifdef CCAN_TDB2_DEBUG
122         if (tdb_read_off(tdb, off) != r_off) {
123                 return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
124                                   "remove_from_list:"
125                                   " %llu bad prev in list %llu",
126                                   (long long)r_off, (long long)b_off);
127         }
128 #endif
129
130         /* r->prev->next = r->next */
131         ecode = tdb_write_off(tdb, off, r->next);
132         if (ecode != TDB_SUCCESS) {
133                 return ecode;
134         }
135
136         if (r->next != 0) {
137                 off = r->next + offsetof(struct tdb_free_record,magic_and_prev);
138                 /* r->next->prev = r->prev */
139
140 #ifdef CCAN_TDB2_DEBUG
141                 if (tdb_read_off(tdb, off) & TDB_OFF_MASK != r_off) {
142                         return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
143                                           "remove_from_list:"
144                                           " %llu bad list %llu",
145                                           (long long)r_off, (long long)b_off);
146                 }
147 #endif
148
149                 ecode = tdb_write_off(tdb, off, r->magic_and_prev);
150                 if (ecode != TDB_SUCCESS) {
151                         return ecode;
152                 }
153         }
154         return TDB_SUCCESS;
155 }
156
157 /* Enqueue in this free bucket. */
158 static enum TDB_ERROR enqueue_in_free(struct tdb_context *tdb,
159                                       tdb_off_t b_off,
160                                       tdb_off_t off,
161                                       tdb_len_t len)
162 {
163         struct tdb_free_record new;
164         enum TDB_ERROR ecode;
165         uint64_t magic = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL));
166
167         /* We only need to set ftable_and_len; rest is set in enqueue_in_free */
168         new.ftable_and_len = ((uint64_t)tdb->ftable << (64 - TDB_OFF_UPPER_STEAL))
169                 | len;
170         /* prev = 0. */
171         new.magic_and_prev = magic;
172
173         /* new->next = head. */
174         new.next = tdb_read_off(tdb, b_off);
175         if (TDB_OFF_IS_ERR(new.next)) {
176                 return new.next;
177         }
178
179         if (new.next) {
180 #ifdef CCAN_TDB2_DEBUG
181                 if (tdb_read_off(tdb,
182                                  new.next + offsetof(struct tdb_free_record,
183                                                      magic_and_prev))
184                     != magic) {
185                         return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
186                                           "enqueue_in_free: %llu bad head"
187                                           " prev %llu",
188                                           (long long)new.next,
189                                           (long long)b_off);
190                 }
191 #endif
192                 /* next->prev = new. */
193                 ecode = tdb_write_off(tdb, new.next
194                                       + offsetof(struct tdb_free_record,
195                                                  magic_and_prev),
196                                       off | magic);
197                 if (ecode != TDB_SUCCESS) {
198                         return ecode;
199                 }
200         }
201         /* head = new */
202         ecode = tdb_write_off(tdb, b_off, off);
203         if (ecode != TDB_SUCCESS) {
204                 return ecode;
205         }
206
207         return tdb_write_convert(tdb, off, &new, sizeof(new));
208 }
209
210 /* List need not be locked. */
211 enum TDB_ERROR add_free_record(struct tdb_context *tdb,
212                                tdb_off_t off, tdb_len_t len_with_header)
213 {
214         tdb_off_t b_off;
215         tdb_len_t len;
216         enum TDB_ERROR ecode;
217
218         assert(len_with_header >= sizeof(struct tdb_free_record));
219
220         len = len_with_header - sizeof(struct tdb_used_record);
221
222         b_off = bucket_off(tdb->ftable_off, size_to_bucket(len));
223         ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT);
224         if (ecode != TDB_SUCCESS) {
225                 return ecode;
226         }
227
228         ecode = enqueue_in_free(tdb, b_off, off, len);
229         tdb_unlock_free_bucket(tdb, b_off);
230         return ecode;
231 }
232
233 static size_t adjust_size(size_t keylen, size_t datalen)
234 {
235         size_t size = keylen + datalen;
236
237         if (size < TDB_MIN_DATA_LEN)
238                 size = TDB_MIN_DATA_LEN;
239
240         /* Round to next uint64_t boundary. */
241         return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
242 }
243
244 /* If we have enough left over to be useful, split that off. */
245 static size_t record_leftover(size_t keylen, size_t datalen,
246                               bool want_extra, size_t total_len)
247 {
248         ssize_t leftover;
249
250         if (want_extra)
251                 datalen += datalen / 2;
252         leftover = total_len - adjust_size(keylen, datalen);
253
254         if (leftover < (ssize_t)sizeof(struct tdb_free_record))
255                 return 0;
256
257         return leftover;
258 }
259
260 static tdb_off_t ftable_offset(struct tdb_context *tdb, unsigned int ftable)
261 {
262         tdb_off_t off;
263         unsigned int i;
264
265         if (likely(tdb->ftable == ftable))
266                 return tdb->ftable_off;
267
268         off = first_ftable(tdb);
269         for (i = 0; i < ftable; i++) {
270                 if (TDB_OFF_IS_ERR(off)) {
271                         break;
272                 }
273                 off = next_ftable(tdb, off);
274         }
275         return off;
276 }
277
278 /* Note: we unlock the current bucket if we coalesce or fail. */
279 static tdb_bool_err coalesce(struct tdb_context *tdb,
280                              tdb_off_t off, tdb_off_t b_off,
281                              tdb_len_t data_len)
282 {
283         tdb_off_t end;
284         struct tdb_free_record rec;
285         enum TDB_ERROR ecode;
286
287         add_stat(tdb, alloc_coalesce_tried, 1);
288         end = off + sizeof(struct tdb_used_record) + data_len;
289
290         while (end < tdb->file->map_size) {
291                 const struct tdb_free_record *r;
292                 tdb_off_t nb_off;
293                 unsigned ftable, bucket;
294
295                 r = tdb_access_read(tdb, end, sizeof(*r), true);
296                 if (TDB_PTR_IS_ERR(r)) {
297                         ecode = TDB_PTR_ERR(r);
298                         goto err;
299                 }
300
301                 if (frec_magic(r) != TDB_FREE_MAGIC
302                     || frec_ftable(r) == TDB_FTABLE_NONE) {
303                         tdb_access_release(tdb, r);
304                         break;
305                 }
306
307                 ftable = frec_ftable(r);
308                 bucket = size_to_bucket(frec_len(r));
309                 nb_off = ftable_offset(tdb, ftable);
310                 if (TDB_OFF_IS_ERR(nb_off)) {
311                         tdb_access_release(tdb, r);
312                         ecode = nb_off;
313                         goto err;
314                 }
315                 nb_off = bucket_off(nb_off, bucket);
316                 tdb_access_release(tdb, r);
317
318                 /* We may be violating lock order here, so best effort. */
319                 if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT)
320                     != TDB_SUCCESS) {
321                         add_stat(tdb, alloc_coalesce_lockfail, 1);
322                         break;
323                 }
324
325                 /* Now we have lock, re-check. */
326                 ecode = tdb_read_convert(tdb, end, &rec, sizeof(rec));
327                 if (ecode != TDB_SUCCESS) {
328                         tdb_unlock_free_bucket(tdb, nb_off);
329                         goto err;
330                 }
331
332                 if (unlikely(frec_magic(&rec) != TDB_FREE_MAGIC)) {
333                         add_stat(tdb, alloc_coalesce_race, 1);
334                         tdb_unlock_free_bucket(tdb, nb_off);
335                         break;
336                 }
337
338                 if (unlikely(frec_ftable(&rec) != ftable)
339                     || unlikely(size_to_bucket(frec_len(&rec)) != bucket)) {
340                         add_stat(tdb, alloc_coalesce_race, 1);
341                         tdb_unlock_free_bucket(tdb, nb_off);
342                         break;
343                 }
344
345                 ecode = remove_from_list(tdb, nb_off, end, &rec);
346                 if (ecode != TDB_SUCCESS) {
347                         tdb_unlock_free_bucket(tdb, nb_off);
348                         goto err;
349                 }
350
351                 end += sizeof(struct tdb_used_record) + frec_len(&rec);
352                 tdb_unlock_free_bucket(tdb, nb_off);
353                 add_stat(tdb, alloc_coalesce_num_merged, 1);
354         }
355
356         /* Didn't find any adjacent free? */
357         if (end == off + sizeof(struct tdb_used_record) + data_len)
358                 return false;
359
360         /* OK, expand initial record */
361         ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
362         if (ecode != TDB_SUCCESS) {
363                 goto err;
364         }
365
366         if (frec_len(&rec) != data_len) {
367                 ecode = tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
368                                    "coalesce: expected data len %zu not %zu",
369                                    (size_t)data_len, (size_t)frec_len(&rec));
370                 goto err;
371         }
372
373         ecode = remove_from_list(tdb, b_off, off, &rec);
374         if (ecode != TDB_SUCCESS) {
375                 goto err;
376         }
377
378         /* We have to drop this to avoid deadlocks, so make sure record
379          * doesn't get coalesced by someone else! */
380         rec.ftable_and_len = (TDB_FTABLE_NONE << (64 - TDB_OFF_UPPER_STEAL))
381                 | (end - off - sizeof(struct tdb_used_record));
382         ecode = tdb_write_off(tdb, off + offsetof(struct tdb_free_record,
383                                                   ftable_and_len),
384                               rec.ftable_and_len);
385         if (ecode != TDB_SUCCESS) {
386                 goto err;
387         }
388
389         add_stat(tdb, alloc_coalesce_succeeded, 1);
390         tdb_unlock_free_bucket(tdb, b_off);
391
392         ecode = add_free_record(tdb, off, end - off);
393         if (ecode != TDB_SUCCESS) {
394                 return ecode;
395         }
396         return true;
397
398 err:
399         /* To unify error paths, we *always* unlock bucket on error. */
400         tdb_unlock_free_bucket(tdb, b_off);
401         return ecode;
402 }
403
404 /* We need size bytes to put our key and data in. */
405 static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
406                                 tdb_off_t ftable_off,
407                                 tdb_off_t bucket,
408                                 size_t keylen, size_t datalen,
409                                 bool want_extra,
410                                 unsigned magic,
411                                 unsigned hashlow)
412 {
413         tdb_off_t off, b_off,best_off;
414         struct tdb_free_record best = { 0 };
415         double multiplier;
416         size_t size = adjust_size(keylen, datalen);
417         enum TDB_ERROR ecode;
418
419         add_stat(tdb, allocs, 1);
420 again:
421         b_off = bucket_off(ftable_off, bucket);
422
423         /* FIXME: Try non-blocking wait first, to measure contention. */
424         /* Lock this bucket. */
425         ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT);
426         if (ecode != TDB_SUCCESS) {
427                 return ecode;
428         }
429
430         best.ftable_and_len = -1ULL;
431         best_off = 0;
432
433         /* Get slack if we're after extra. */
434         if (want_extra)
435                 multiplier = 1.5;
436         else
437                 multiplier = 1.0;
438
439         /* Walk the list to see if any are large enough, getting less fussy
440          * as we go. */
441         off = tdb_read_off(tdb, b_off);
442         if (TDB_OFF_IS_ERR(off)) {
443                 ecode = off;
444                 goto unlock_err;
445         }
446
447         while (off) {
448                 const struct tdb_free_record *r;
449                 tdb_len_t len;
450                 tdb_off_t next;
451                 int coal;
452
453                 r = tdb_access_read(tdb, off, sizeof(*r), true);
454                 if (TDB_PTR_IS_ERR(r)) {
455                         ecode = TDB_PTR_ERR(r);
456                         goto unlock_err;
457                 }
458
459                 if (frec_magic(r) != TDB_FREE_MAGIC) {
460                         tdb_access_release(tdb, r);
461                         ecode = tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
462                                            "lock_and_alloc:"
463                                            " %llu non-free 0x%llx",
464                                            (long long)off,
465                                            (long long)r->magic_and_prev);
466                         goto unlock_err;
467                 }
468
469                 if (frec_len(r) >= size && frec_len(r) < frec_len(&best)) {
470                         best_off = off;
471                         best = *r;
472                 }
473
474                 if (frec_len(&best) < size * multiplier && best_off) {
475                         tdb_access_release(tdb, r);
476                         break;
477                 }
478
479                 multiplier *= 1.01;
480
481                 next = r->next;
482                 len = frec_len(r);
483                 tdb_access_release(tdb, r);
484
485                 /* Since we're going slow anyway, try coalescing here. */
486                 coal = coalesce(tdb, off, b_off, len);
487                 if (coal == 1) {
488                         /* This has unlocked list, restart. */
489                         goto again;
490                 }
491                 if (coal < 0) {
492                         /* This has already unlocked on error. */
493                         return coal;
494                 }
495                 off = next;
496         }
497
498         /* If we found anything at all, use it. */
499         if (best_off) {
500                 struct tdb_used_record rec;
501                 size_t leftover;
502
503                 /* We're happy with this size: take it. */
504                 ecode = remove_from_list(tdb, b_off, best_off, &best);
505                 if (ecode != TDB_SUCCESS) {
506                         goto unlock_err;
507                 }
508
509                 leftover = record_leftover(keylen, datalen, want_extra,
510                                            frec_len(&best));
511
512                 assert(keylen + datalen + leftover <= frec_len(&best));
513                 /* We need to mark non-free before we drop lock, otherwise
514                  * coalesce() could try to merge it! */
515                 ecode = set_header(tdb, &rec, magic, keylen, datalen,
516                                    frec_len(&best) - leftover, hashlow);
517                 if (ecode != TDB_SUCCESS) {
518                         goto unlock_err;
519                 }
520
521                 ecode = tdb_write_convert(tdb, best_off, &rec, sizeof(rec));
522                 if (ecode != TDB_SUCCESS) {
523                         goto unlock_err;
524                 }
525
526                 /* For futureproofing, we put a 0 in any unused space. */
527                 if (rec_extra_padding(&rec)) {
528                         ecode = tdb->methods->twrite(tdb, best_off + sizeof(rec)
529                                                      + keylen + datalen, "", 1);
530                         if (ecode != TDB_SUCCESS) {
531                                 goto unlock_err;
532                         }
533                 }
534
535                 /* Bucket of leftover will be <= current bucket, so nested
536                  * locking is allowed. */
537                 if (leftover) {
538                         add_stat(tdb, alloc_leftover, 1);
539                         ecode = add_free_record(tdb,
540                                                 best_off + sizeof(rec)
541                                                 + frec_len(&best) - leftover,
542                                                 leftover);
543                         if (ecode != TDB_SUCCESS) {
544                                 best_off = ecode;
545                         }
546                 }
547                 tdb_unlock_free_bucket(tdb, b_off);
548
549                 return best_off;
550         }
551
552         tdb_unlock_free_bucket(tdb, b_off);
553         return 0;
554
555 unlock_err:
556         tdb_unlock_free_bucket(tdb, b_off);
557         return ecode;
558 }
559
560 /* Get a free block from current free list, or 0 if none, -ve on error. */
561 static tdb_off_t get_free(struct tdb_context *tdb,
562                           size_t keylen, size_t datalen, bool want_extra,
563                           unsigned magic, unsigned hashlow)
564 {
565         tdb_off_t off, ftable_off;
566         tdb_off_t start_b, b, ftable;
567         bool wrapped = false;
568
569         /* If they are growing, add 50% to get to higher bucket. */
570         if (want_extra)
571                 start_b = size_to_bucket(adjust_size(keylen,
572                                                      datalen + datalen / 2));
573         else
574                 start_b = size_to_bucket(adjust_size(keylen, datalen));
575
576         ftable_off = tdb->ftable_off;
577         ftable = tdb->ftable;
578         while (!wrapped || ftable_off != tdb->ftable_off) {
579                 /* Start at exact size bucket, and search up... */
580                 for (b = find_free_head(tdb, ftable_off, start_b);
581                      b < TDB_FREE_BUCKETS;
582                      b = find_free_head(tdb, ftable_off, b + 1)) {
583                         /* Try getting one from list. */
584                         off = lock_and_alloc(tdb, ftable_off,
585                                              b, keylen, datalen, want_extra,
586                                              magic, hashlow);
587                         if (TDB_OFF_IS_ERR(off))
588                                 return off;
589                         if (off != 0) {
590                                 if (b == start_b)
591                                         add_stat(tdb, alloc_bucket_exact, 1);
592                                 if (b == TDB_FREE_BUCKETS - 1)
593                                         add_stat(tdb, alloc_bucket_max, 1);
594                                 /* Worked?  Stay using this list. */
595                                 tdb->ftable_off = ftable_off;
596                                 tdb->ftable = ftable;
597                                 return off;
598                         }
599                         /* Didn't work.  Try next bucket. */
600                 }
601
602                 if (TDB_OFF_IS_ERR(b)) {
603                         return b;
604                 }
605
606                 /* Hmm, try next table. */
607                 ftable_off = next_ftable(tdb, ftable_off);
608                 if (TDB_OFF_IS_ERR(ftable_off)) {
609                         return ftable_off;
610                 }
611                 ftable++;
612
613                 if (ftable_off == 0) {
614                         wrapped = true;
615                         ftable_off = first_ftable(tdb);
616                         if (TDB_OFF_IS_ERR(ftable_off)) {
617                                 return ftable_off;
618                         }
619                         ftable = 0;
620                 }
621         }
622
623         return 0;
624 }
625
626 enum TDB_ERROR set_header(struct tdb_context *tdb,
627                           struct tdb_used_record *rec,
628                           unsigned magic, uint64_t keylen, uint64_t datalen,
629                           uint64_t actuallen, unsigned hashlow)
630 {
631         uint64_t keybits = (fls64(keylen) + 1) / 2;
632
633         /* Use bottom bits of hash, so it's independent of hash table size. */
634         rec->magic_and_meta = (hashlow & ((1 << 11)-1))
635                 | ((actuallen - (keylen + datalen)) << 11)
636                 | (keybits << 43)
637                 | ((uint64_t)magic << 48);
638         rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
639
640         /* Encoding can fail on big values. */
641         if (rec_key_length(rec) != keylen
642             || rec_data_length(rec) != datalen
643             || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
644                 return tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
645                                   "Could not encode k=%llu,d=%llu,a=%llu",
646                                   (long long)keylen, (long long)datalen,
647                                   (long long)actuallen);
648         }
649         return TDB_SUCCESS;
650 }
651
652 /* Expand the database. */
653 static enum TDB_ERROR tdb_expand(struct tdb_context *tdb, tdb_len_t size)
654 {
655         uint64_t old_size;
656         tdb_len_t wanted;
657         enum TDB_ERROR ecode;
658
659         /* We need room for the record header too. */
660         wanted = sizeof(struct tdb_used_record) + size;
661
662         /* Need to hold a hash lock to expand DB: transactions rely on it. */
663         if (!(tdb->flags & TDB_NOLOCK)
664             && !tdb->file->allrecord_lock.count && !tdb_has_hash_locks(tdb)) {
665                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
666                                   "tdb_expand: must hold lock during expand");
667         }
668
669         /* always make room for at least 100 more records, and at
670            least 25% more space. */
671         if (size * TDB_EXTENSION_FACTOR > tdb->file->map_size / 4)
672                 wanted = size * TDB_EXTENSION_FACTOR;
673         else
674                 wanted = tdb->file->map_size / 4;
675         wanted = adjust_size(0, wanted);
676
677         /* Only one person can expand file at a time. */
678         ecode = tdb_lock_expand(tdb, F_WRLCK);
679         if (ecode != TDB_SUCCESS) {
680                 return ecode;
681         }
682
683         /* Someone else may have expanded the file, so retry. */
684         old_size = tdb->file->map_size;
685         tdb->methods->oob(tdb, tdb->file->map_size + 1, true);
686         if (tdb->file->map_size != old_size) {
687                 tdb_unlock_expand(tdb, F_WRLCK);
688                 return TDB_SUCCESS;
689         }
690
691         ecode = tdb->methods->expand_file(tdb, wanted);
692         if (ecode != TDB_SUCCESS) {
693                 tdb_unlock_expand(tdb, F_WRLCK);
694                 return ecode;
695         }
696
697         /* We need to drop this lock before adding free record. */
698         tdb_unlock_expand(tdb, F_WRLCK);
699
700         add_stat(tdb, expands, 1);
701         return add_free_record(tdb, old_size, wanted);
702 }
703
704 /* This won't fail: it will expand the database if it has to. */
705 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
706                 uint64_t hash, unsigned magic, bool growing)
707 {
708         tdb_off_t off;
709
710         /* We can't hold pointers during this: we could unmap! */
711         assert(!tdb->direct_access);
712
713         for (;;) {
714                 enum TDB_ERROR ecode;
715                 off = get_free(tdb, keylen, datalen, growing, magic, hash);
716                 if (likely(off != 0))
717                         break;
718
719                 ecode = tdb_expand(tdb, adjust_size(keylen, datalen));
720                 if (ecode != TDB_SUCCESS) {
721                         return ecode;
722                 }
723         }
724
725         return off;
726 }