]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/free.c
tdb2: don't start again when we coalesce a record.
[ccan] / ccan / tdb2 / free.c
1  /*
2    Trivial Database 2: free list/block handling
3    Copyright (C) Rusty Russell 2010
4
5    This library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 3 of the License, or (at your option) any later version.
9
10    This library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 */
18 #include "private.h"
19 #include <ccan/likely/likely.h>
20 #include <ccan/ilog/ilog.h>
21 #include <time.h>
22 #include <assert.h>
23 #include <limits.h>
24
25 static unsigned fls64(uint64_t val)
26 {
27         return ilog64(val);
28 }
29
30 /* In which bucket would we find a particular record size? (ignoring header) */
31 unsigned int size_to_bucket(tdb_len_t data_len)
32 {
33         unsigned int bucket;
34
35         /* We can't have records smaller than this. */
36         assert(data_len >= TDB_MIN_DATA_LEN);
37
38         /* Ignoring the header... */
39         if (data_len - TDB_MIN_DATA_LEN <= 64) {
40                 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 8. */
41                 bucket = (data_len - TDB_MIN_DATA_LEN) / 8;
42         } else {
43                 /* After that we go power of 2. */
44                 bucket = fls64(data_len - TDB_MIN_DATA_LEN) + 2;
45         }
46
47         if (unlikely(bucket >= TDB_FREE_BUCKETS))
48                 bucket = TDB_FREE_BUCKETS - 1;
49         return bucket;
50 }
51
52 tdb_off_t first_ftable(struct tdb_context *tdb)
53 {
54         return tdb_read_off(tdb, offsetof(struct tdb_header, free_table));
55 }
56
57 tdb_off_t next_ftable(struct tdb_context *tdb, tdb_off_t ftable)
58 {
59         return tdb_read_off(tdb, ftable + offsetof(struct tdb_freetable,next));
60 }
61
62 enum TDB_ERROR tdb_ftable_init(struct tdb_context *tdb)
63 {
64         /* Use reservoir sampling algorithm to select a free list at random. */
65         unsigned int rnd, max = 0, count = 0;
66         tdb_off_t off;
67
68         tdb->ftable_off = off = first_ftable(tdb);
69         tdb->ftable = 0;
70
71         while (off) {
72                 if (TDB_OFF_IS_ERR(off)) {
73                         return off;
74                 }
75
76                 rnd = random();
77                 if (rnd >= max) {
78                         tdb->ftable_off = off;
79                         tdb->ftable = count;
80                         max = rnd;
81                 }
82
83                 off = next_ftable(tdb, off);
84                 count++;
85         }
86         return TDB_SUCCESS;
87 }
88
89 /* Offset of a given bucket. */
90 tdb_off_t bucket_off(tdb_off_t ftable_off, unsigned bucket)
91 {
92         return ftable_off + offsetof(struct tdb_freetable, buckets)
93                 + bucket * sizeof(tdb_off_t);
94 }
95
96 /* Returns free_buckets + 1, or list number to search, or -ve error. */
97 static tdb_off_t find_free_head(struct tdb_context *tdb,
98                                 tdb_off_t ftable_off,
99                                 tdb_off_t bucket)
100 {
101         /* Speculatively search for a non-zero bucket. */
102         return tdb_find_nonzero_off(tdb, bucket_off(ftable_off, 0),
103                                     bucket, TDB_FREE_BUCKETS);
104 }
105
106 static void check_list(struct tdb_context *tdb, tdb_off_t b_off)
107 {
108 #ifdef CCAN_TDB2_DEBUG
109         tdb_off_t off, prev = 0, first;
110         struct tdb_free_record r;
111
112         first = off = tdb_read_off(tdb, b_off);
113         while (off != 0) {
114                 tdb_read_convert(tdb, off, &r, sizeof(r));
115                 if (frec_magic(&r) != TDB_FREE_MAGIC)
116                         abort();
117                 if (prev && frec_prev(&r) != prev)
118                         abort();
119                 prev = off;
120                 off = r.next;
121         }
122
123         if (first) {
124                 tdb_read_convert(tdb, first, &r, sizeof(r));
125                 if (frec_prev(&r) != prev)
126                         abort();
127         }
128 #endif
129 }
130
131 /* Remove from free bucket. */
132 static enum TDB_ERROR remove_from_list(struct tdb_context *tdb,
133                                        tdb_off_t b_off, tdb_off_t r_off,
134                                        const struct tdb_free_record *r)
135 {
136         tdb_off_t off, prev_next, head;
137         enum TDB_ERROR ecode;
138
139         /* Is this only element in list?  Zero out bucket, and we're done. */
140         if (frec_prev(r) == r_off)
141                 return tdb_write_off(tdb, b_off, 0);
142
143         /* off = &r->prev->next */
144         off = frec_prev(r) + offsetof(struct tdb_free_record, next);
145
146         /* Get prev->next */
147         prev_next = tdb_read_off(tdb, off);
148         if (TDB_OFF_IS_ERR(prev_next))
149                 return prev_next;
150
151         /* If prev->next == 0, we were head: update bucket to point to next. */
152         if (prev_next == 0) {
153 #ifdef CCAN_TDB2_DEBUG
154                 if (tdb_read_off(tdb, b_off) != r_off) {
155                         return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
156                                           "remove_from_list:"
157                                           " %llu head %llu on list %llu",
158                                           (long long)r_off,
159                                           (long long)tdb_read_off(tdb, b_off),
160                                           (long long)b_off);
161                 }
162 #endif
163                 ecode = tdb_write_off(tdb, b_off, r->next);
164                 if (ecode != TDB_SUCCESS)
165                         return ecode;
166         } else {
167                 /* r->prev->next = r->next */
168                 ecode = tdb_write_off(tdb, off, r->next);
169                 if (ecode != TDB_SUCCESS)
170                         return ecode;
171         }
172
173         /* If we were the tail, off = &head->prev. */
174         if (r->next == 0) {
175                 head = tdb_read_off(tdb, b_off);
176                 if (TDB_OFF_IS_ERR(head))
177                         return head;
178                 off = head + offsetof(struct tdb_free_record, magic_and_prev);
179         } else {
180                 /* off = &r->next->prev */
181                 off = r->next + offsetof(struct tdb_free_record,
182                                          magic_and_prev);
183         }
184
185 #ifdef CCAN_TDB2_DEBUG
186         /* *off == r */
187         if ((tdb_read_off(tdb, off) & TDB_OFF_MASK) != r_off) {
188                 return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
189                                   "remove_from_list:"
190                                   " %llu bad prev in list %llu",
191                                   (long long)r_off, (long long)b_off);
192         }
193 #endif
194         /* r->next->prev = r->prev */
195         return tdb_write_off(tdb, off, r->magic_and_prev);
196 }
197
198 /* Enqueue in this free bucket. */
199 static enum TDB_ERROR enqueue_in_free(struct tdb_context *tdb,
200                                       tdb_off_t b_off,
201                                       tdb_off_t off,
202                                       tdb_len_t len)
203 {
204         struct tdb_free_record new;
205         enum TDB_ERROR ecode;
206         tdb_off_t prev;
207         uint64_t magic = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL));
208
209         /* We only need to set ftable_and_len; rest is set in enqueue_in_free */
210         new.ftable_and_len = ((uint64_t)tdb->ftable << (64 - TDB_OFF_UPPER_STEAL))
211                 | len;
212
213         /* new->next = head. */
214         new.next = tdb_read_off(tdb, b_off);
215         if (TDB_OFF_IS_ERR(new.next)) {
216                 return new.next;
217         }
218
219         /* First element?  Prev points to ourselves. */
220         if (!new.next) {
221                 new.magic_and_prev = (magic | off);
222         } else {
223                 /* new->prev = next->prev */
224                 prev = tdb_read_off(tdb,
225                                     new.next + offsetof(struct tdb_free_record,
226                                                         magic_and_prev));
227                 new.magic_and_prev = prev;
228                 if (frec_magic(&new) != TDB_FREE_MAGIC) {
229                         return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
230                                           "enqueue_in_free: %llu bad head"
231                                           " prev %llu",
232                                           (long long)new.next,
233                                           (long long)prev);
234                 }
235                 /* next->prev = new. */
236                 ecode = tdb_write_off(tdb, new.next
237                                       + offsetof(struct tdb_free_record,
238                                                  magic_and_prev),
239                                       off | magic);
240                 if (ecode != TDB_SUCCESS) {
241                         return ecode;
242                 }
243
244 #ifdef CCAN_TDB2_DEBUG
245                 prev = tdb_read_off(tdb, frec_prev(&new)
246                                     + offsetof(struct tdb_free_record, next));
247                 if (prev != 0) {
248                         return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
249                                           "enqueue_in_free:"
250                                           " %llu bad tail next ptr %llu",
251                                           (long long)frec_prev(&new)
252                                           + offsetof(struct tdb_free_record,
253                                                      next),
254                                           (long long)prev);
255                 }
256 #endif
257         }
258         /* head = new */
259         ecode = tdb_write_off(tdb, b_off, off);
260         if (ecode != TDB_SUCCESS) {
261                 return ecode;
262         }
263
264         return tdb_write_convert(tdb, off, &new, sizeof(new));
265 }
266
267 /* List need not be locked. */
268 enum TDB_ERROR add_free_record(struct tdb_context *tdb,
269                                tdb_off_t off, tdb_len_t len_with_header,
270                                enum tdb_lock_flags waitflag)
271 {
272         tdb_off_t b_off;
273         tdb_len_t len;
274         enum TDB_ERROR ecode;
275
276         assert(len_with_header >= sizeof(struct tdb_free_record));
277
278         len = len_with_header - sizeof(struct tdb_used_record);
279
280         b_off = bucket_off(tdb->ftable_off, size_to_bucket(len));
281         ecode = tdb_lock_free_bucket(tdb, b_off, waitflag);
282         if (ecode != TDB_SUCCESS) {
283                 return ecode;
284         }
285
286         ecode = enqueue_in_free(tdb, b_off, off, len);
287         check_list(tdb, b_off);
288         tdb_unlock_free_bucket(tdb, b_off);
289         return ecode;
290 }
291
292 static size_t adjust_size(size_t keylen, size_t datalen)
293 {
294         size_t size = keylen + datalen;
295
296         if (size < TDB_MIN_DATA_LEN)
297                 size = TDB_MIN_DATA_LEN;
298
299         /* Round to next uint64_t boundary. */
300         return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
301 }
302
303 /* If we have enough left over to be useful, split that off. */
304 static size_t record_leftover(size_t keylen, size_t datalen,
305                               bool want_extra, size_t total_len)
306 {
307         ssize_t leftover;
308
309         if (want_extra)
310                 datalen += datalen / 2;
311         leftover = total_len - adjust_size(keylen, datalen);
312
313         if (leftover < (ssize_t)sizeof(struct tdb_free_record))
314                 return 0;
315
316         return leftover;
317 }
318
319 static tdb_off_t ftable_offset(struct tdb_context *tdb, unsigned int ftable)
320 {
321         tdb_off_t off;
322         unsigned int i;
323
324         if (likely(tdb->ftable == ftable))
325                 return tdb->ftable_off;
326
327         off = first_ftable(tdb);
328         for (i = 0; i < ftable; i++) {
329                 if (TDB_OFF_IS_ERR(off)) {
330                         break;
331                 }
332                 off = next_ftable(tdb, off);
333         }
334         return off;
335 }
336
337 /* Note: we unlock the current bucket if fail (-ve), or coalesce (-ve) and
338  * need to blatt either of the *protect records (which is set to an error). */
339 static tdb_len_t coalesce(struct tdb_context *tdb,
340                           tdb_off_t off, tdb_off_t b_off,
341                           tdb_len_t data_len,
342                           tdb_off_t *protect1,
343                           tdb_off_t *protect2)
344 {
345         tdb_off_t end;
346         struct tdb_free_record rec;
347         enum TDB_ERROR ecode;
348
349         tdb->stats.alloc_coalesce_tried++;
350         end = off + sizeof(struct tdb_used_record) + data_len;
351
352         while (end < tdb->file->map_size) {
353                 const struct tdb_free_record *r;
354                 tdb_off_t nb_off;
355                 unsigned ftable, bucket;
356
357                 r = tdb_access_read(tdb, end, sizeof(*r), true);
358                 if (TDB_PTR_IS_ERR(r)) {
359                         ecode = TDB_PTR_ERR(r);
360                         goto err;
361                 }
362
363                 if (frec_magic(r) != TDB_FREE_MAGIC
364                     || frec_ftable(r) == TDB_FTABLE_NONE) {
365                         tdb_access_release(tdb, r);
366                         break;
367                 }
368
369                 ftable = frec_ftable(r);
370                 bucket = size_to_bucket(frec_len(r));
371                 nb_off = ftable_offset(tdb, ftable);
372                 if (TDB_OFF_IS_ERR(nb_off)) {
373                         tdb_access_release(tdb, r);
374                         ecode = nb_off;
375                         goto err;
376                 }
377                 nb_off = bucket_off(nb_off, bucket);
378                 tdb_access_release(tdb, r);
379
380                 /* We may be violating lock order here, so best effort. */
381                 if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT)
382                     != TDB_SUCCESS) {
383                         tdb->stats.alloc_coalesce_lockfail++;
384                         break;
385                 }
386
387                 /* Now we have lock, re-check. */
388                 ecode = tdb_read_convert(tdb, end, &rec, sizeof(rec));
389                 if (ecode != TDB_SUCCESS) {
390                         tdb_unlock_free_bucket(tdb, nb_off);
391                         goto err;
392                 }
393
394                 if (unlikely(frec_magic(&rec) != TDB_FREE_MAGIC)) {
395                         tdb->stats.alloc_coalesce_race++;
396                         tdb_unlock_free_bucket(tdb, nb_off);
397                         break;
398                 }
399
400                 if (unlikely(frec_ftable(&rec) != ftable)
401                     || unlikely(size_to_bucket(frec_len(&rec)) != bucket)) {
402                         tdb->stats.alloc_coalesce_race++;
403                         tdb_unlock_free_bucket(tdb, nb_off);
404                         break;
405                 }
406
407                 /* Did we just mess up a record you were hoping to use? */
408                 if (end == *protect1 || end == *protect2)
409                         *protect1 = TDB_ERR_NOEXIST;
410
411                 ecode = remove_from_list(tdb, nb_off, end, &rec);
412                 check_list(tdb, nb_off);
413                 if (ecode != TDB_SUCCESS) {
414                         tdb_unlock_free_bucket(tdb, nb_off);
415                         goto err;
416                 }
417
418                 end += sizeof(struct tdb_used_record) + frec_len(&rec);
419                 tdb_unlock_free_bucket(tdb, nb_off);
420                 tdb->stats.alloc_coalesce_num_merged++;
421         }
422
423         /* Didn't find any adjacent free? */
424         if (end == off + sizeof(struct tdb_used_record) + data_len)
425                 return 0;
426
427         /* Before we expand, check this isn't one you wanted protected? */
428         if (off == *protect1 || off == *protect2)
429                 *protect1 = TDB_ERR_EXISTS;
430
431         /* OK, expand initial record */
432         ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
433         if (ecode != TDB_SUCCESS) {
434                 goto err;
435         }
436
437         if (frec_len(&rec) != data_len) {
438                 ecode = tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
439                                    "coalesce: expected data len %zu not %zu",
440                                    (size_t)data_len, (size_t)frec_len(&rec));
441                 goto err;
442         }
443
444         ecode = remove_from_list(tdb, b_off, off, &rec);
445         check_list(tdb, b_off);
446         if (ecode != TDB_SUCCESS) {
447                 goto err;
448         }
449
450         /* Try locking violation first... */
451         ecode = add_free_record(tdb, off, end - off, TDB_LOCK_NOWAIT);
452         if (ecode != TDB_SUCCESS) {
453                 /* Need to drop lock.  Can't rely on anything stable. */
454                 *protect1 = TDB_ERR_CORRUPT;
455
456                 /* We have to drop this to avoid deadlocks, so make sure record
457                  * doesn't get coalesced by someone else! */
458                 rec.ftable_and_len = (TDB_FTABLE_NONE
459                                       << (64 - TDB_OFF_UPPER_STEAL))
460                         | (end - off - sizeof(struct tdb_used_record));
461                 ecode = tdb_write_off(tdb,
462                                       off + offsetof(struct tdb_free_record,
463                                                      ftable_and_len),
464                                       rec.ftable_and_len);
465                 if (ecode != TDB_SUCCESS) {
466                         goto err;
467                 }
468
469                 tdb->stats.alloc_coalesce_succeeded++;
470                 tdb_unlock_free_bucket(tdb, b_off);
471
472                 ecode = add_free_record(tdb, off, end - off, TDB_LOCK_WAIT);
473                 if (ecode != TDB_SUCCESS) {
474                         return ecode;
475                 }
476         } else if (TDB_OFF_IS_ERR(*protect1)) {
477                 /* For simplicity, we always drop lock if they can't continue */
478                 tdb_unlock_free_bucket(tdb, b_off);
479         }
480
481         /* Return usable length. */
482         return end - off - sizeof(struct tdb_used_record);
483
484 err:
485         /* To unify error paths, we *always* unlock bucket on error. */
486         tdb_unlock_free_bucket(tdb, b_off);
487         return ecode;
488 }
489
490 /* We need size bytes to put our key and data in. */
491 static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
492                                 tdb_off_t ftable_off,
493                                 tdb_off_t bucket,
494                                 size_t keylen, size_t datalen,
495                                 bool want_extra,
496                                 unsigned magic,
497                                 unsigned hashlow)
498 {
499         tdb_off_t off, b_off,best_off;
500         struct tdb_free_record best = { 0 };
501         double multiplier;
502         bool coalesce_after_best = false; /* Damn GCC warning! */
503         size_t size = adjust_size(keylen, datalen);
504         enum TDB_ERROR ecode;
505
506         tdb->stats.allocs++;
507 again:
508         b_off = bucket_off(ftable_off, bucket);
509
510         /* FIXME: Try non-blocking wait first, to measure contention. */
511         /* Lock this bucket. */
512         ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT);
513         if (ecode != TDB_SUCCESS) {
514                 return ecode;
515         }
516
517         best.ftable_and_len = -1ULL;
518         best_off = 0;
519
520         /* Get slack if we're after extra. */
521         if (want_extra)
522                 multiplier = 1.5;
523         else
524                 multiplier = 1.0;
525
526         /* Walk the list to see if any are large enough, getting less fussy
527          * as we go. */
528         off = tdb_read_off(tdb, b_off);
529         if (TDB_OFF_IS_ERR(off)) {
530                 ecode = off;
531                 goto unlock_err;
532         }
533
534         while (off) {
535                 const struct tdb_free_record *r;
536                 tdb_len_t len, coal;
537                 tdb_off_t next;
538
539                 r = tdb_access_read(tdb, off, sizeof(*r), true);
540                 if (TDB_PTR_IS_ERR(r)) {
541                         ecode = TDB_PTR_ERR(r);
542                         goto unlock_err;
543                 }
544
545                 if (frec_magic(r) != TDB_FREE_MAGIC) {
546                         ecode = tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
547                                            "lock_and_alloc:"
548                                            " %llu non-free 0x%llx",
549                                            (long long)off,
550                                            (long long)r->magic_and_prev);
551                         tdb_access_release(tdb, r);
552                         goto unlock_err;
553                 }
554
555                 if (frec_len(r) >= size && frec_len(r) < frec_len(&best)) {
556                         best_off = off;
557                         best = *r;
558                         coalesce_after_best = false;
559                 }
560
561                 if (frec_len(&best) <= size * multiplier && best_off) {
562                         tdb_access_release(tdb, r);
563                         break;
564                 }
565
566                 multiplier *= 1.01;
567
568                 next = r->next;
569                 len = frec_len(r);
570                 tdb_access_release(tdb, r);
571
572                 /* Since we're going slow anyway, try coalescing here. */
573                 coal = coalesce(tdb, off, b_off, len, &best_off, &next);
574                 if (TDB_OFF_IS_ERR(coal)) {
575                         /* This has already unlocked on error. */
576                         return coal;
577                 }
578                 if (TDB_OFF_IS_ERR(best_off)) {
579                         /* This has unlocked list, restart. */
580                         goto again;
581                 }
582                 if (coal > 0)
583                         coalesce_after_best = true;
584                 off = next;
585         }
586
587         /* If we found anything at all, use it. */
588         if (best_off) {
589                 struct tdb_used_record rec;
590                 size_t leftover;
591
592                 /* If we coalesced, we might have change prev/next ptrs. */
593                 if (coalesce_after_best) {
594                         ecode = tdb_read_convert(tdb, best_off, &best,
595                                                  sizeof(best));
596                         if (ecode != TDB_SUCCESS)
597                                 goto unlock_err;
598                 }
599
600                 /* We're happy with this size: take it. */
601                 ecode = remove_from_list(tdb, b_off, best_off, &best);
602                 check_list(tdb, b_off);
603                 if (ecode != TDB_SUCCESS) {
604                         goto unlock_err;
605                 }
606
607                 leftover = record_leftover(keylen, datalen, want_extra,
608                                            frec_len(&best));
609
610                 assert(keylen + datalen + leftover <= frec_len(&best));
611                 /* We need to mark non-free before we drop lock, otherwise
612                  * coalesce() could try to merge it! */
613                 ecode = set_header(tdb, &rec, magic, keylen, datalen,
614                                    frec_len(&best) - leftover, hashlow);
615                 if (ecode != TDB_SUCCESS) {
616                         goto unlock_err;
617                 }
618
619                 ecode = tdb_write_convert(tdb, best_off, &rec, sizeof(rec));
620                 if (ecode != TDB_SUCCESS) {
621                         goto unlock_err;
622                 }
623
624                 /* For futureproofing, we put a 0 in any unused space. */
625                 if (rec_extra_padding(&rec)) {
626                         ecode = tdb->methods->twrite(tdb, best_off + sizeof(rec)
627                                                      + keylen + datalen, "", 1);
628                         if (ecode != TDB_SUCCESS) {
629                                 goto unlock_err;
630                         }
631                 }
632
633                 /* Bucket of leftover will be <= current bucket, so nested
634                  * locking is allowed. */
635                 if (leftover) {
636                         tdb->stats.alloc_leftover++;
637                         ecode = add_free_record(tdb,
638                                                 best_off + sizeof(rec)
639                                                 + frec_len(&best) - leftover,
640                                                 leftover, TDB_LOCK_WAIT);
641                         if (ecode != TDB_SUCCESS) {
642                                 best_off = ecode;
643                         }
644                 }
645                 tdb_unlock_free_bucket(tdb, b_off);
646
647                 return best_off;
648         }
649
650         tdb_unlock_free_bucket(tdb, b_off);
651         return 0;
652
653 unlock_err:
654         tdb_unlock_free_bucket(tdb, b_off);
655         return ecode;
656 }
657
658 /* Get a free block from current free list, or 0 if none, -ve on error. */
659 static tdb_off_t get_free(struct tdb_context *tdb,
660                           size_t keylen, size_t datalen, bool want_extra,
661                           unsigned magic, unsigned hashlow)
662 {
663         tdb_off_t off, ftable_off;
664         tdb_off_t start_b, b, ftable;
665         bool wrapped = false;
666
667         /* If they are growing, add 50% to get to higher bucket. */
668         if (want_extra)
669                 start_b = size_to_bucket(adjust_size(keylen,
670                                                      datalen + datalen / 2));
671         else
672                 start_b = size_to_bucket(adjust_size(keylen, datalen));
673
674         ftable_off = tdb->ftable_off;
675         ftable = tdb->ftable;
676         while (!wrapped || ftable_off != tdb->ftable_off) {
677                 /* Start at exact size bucket, and search up... */
678                 for (b = find_free_head(tdb, ftable_off, start_b);
679                      b < TDB_FREE_BUCKETS;
680                      b = find_free_head(tdb, ftable_off, b + 1)) {
681                         /* Try getting one from list. */
682                         off = lock_and_alloc(tdb, ftable_off,
683                                              b, keylen, datalen, want_extra,
684                                              magic, hashlow);
685                         if (TDB_OFF_IS_ERR(off))
686                                 return off;
687                         if (off != 0) {
688                                 if (b == start_b)
689                                         tdb->stats.alloc_bucket_exact++;
690                                 if (b == TDB_FREE_BUCKETS - 1)
691                                         tdb->stats.alloc_bucket_max++;
692                                 /* Worked?  Stay using this list. */
693                                 tdb->ftable_off = ftable_off;
694                                 tdb->ftable = ftable;
695                                 return off;
696                         }
697                         /* Didn't work.  Try next bucket. */
698                 }
699
700                 if (TDB_OFF_IS_ERR(b)) {
701                         return b;
702                 }
703
704                 /* Hmm, try next table. */
705                 ftable_off = next_ftable(tdb, ftable_off);
706                 if (TDB_OFF_IS_ERR(ftable_off)) {
707                         return ftable_off;
708                 }
709                 ftable++;
710
711                 if (ftable_off == 0) {
712                         wrapped = true;
713                         ftable_off = first_ftable(tdb);
714                         if (TDB_OFF_IS_ERR(ftable_off)) {
715                                 return ftable_off;
716                         }
717                         ftable = 0;
718                 }
719         }
720
721         return 0;
722 }
723
724 enum TDB_ERROR set_header(struct tdb_context *tdb,
725                           struct tdb_used_record *rec,
726                           unsigned magic, uint64_t keylen, uint64_t datalen,
727                           uint64_t actuallen, unsigned hashlow)
728 {
729         uint64_t keybits = (fls64(keylen) + 1) / 2;
730
731         /* Use bottom bits of hash, so it's independent of hash table size. */
732         rec->magic_and_meta = (hashlow & ((1 << 11)-1))
733                 | ((actuallen - (keylen + datalen)) << 11)
734                 | (keybits << 43)
735                 | ((uint64_t)magic << 48);
736         rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
737
738         /* Encoding can fail on big values. */
739         if (rec_key_length(rec) != keylen
740             || rec_data_length(rec) != datalen
741             || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
742                 return tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
743                                   "Could not encode k=%llu,d=%llu,a=%llu",
744                                   (long long)keylen, (long long)datalen,
745                                   (long long)actuallen);
746         }
747         return TDB_SUCCESS;
748 }
749
750 /* Expand the database. */
751 static enum TDB_ERROR tdb_expand(struct tdb_context *tdb, tdb_len_t size)
752 {
753         uint64_t old_size, rec_size, map_size;
754         tdb_len_t wanted;
755         enum TDB_ERROR ecode;
756
757         /* Need to hold a hash lock to expand DB: transactions rely on it. */
758         if (!(tdb->flags & TDB_NOLOCK)
759             && !tdb->file->allrecord_lock.count && !tdb_has_hash_locks(tdb)) {
760                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
761                                   "tdb_expand: must hold lock during expand");
762         }
763
764         /* Only one person can expand file at a time. */
765         ecode = tdb_lock_expand(tdb, F_WRLCK);
766         if (ecode != TDB_SUCCESS) {
767                 return ecode;
768         }
769
770         /* Someone else may have expanded the file, so retry. */
771         old_size = tdb->file->map_size;
772         tdb->methods->oob(tdb, tdb->file->map_size + 1, true);
773         if (tdb->file->map_size != old_size) {
774                 tdb_unlock_expand(tdb, F_WRLCK);
775                 return TDB_SUCCESS;
776         }
777
778         /* limit size in order to avoid using up huge amounts of memory for
779          * in memory tdbs if an oddball huge record creeps in */
780         if (size > 100 * 1024) {
781                 rec_size = size * 2;
782         } else {
783                 rec_size = size * 100;
784         }
785
786         /* always make room for at least rec_size more records, and at
787            least 25% more space. if the DB is smaller than 100MiB,
788            otherwise grow it by 10% only. */
789         if (old_size > 100 * 1024 * 1024) {
790                 map_size = old_size / 10;
791         } else {
792                 map_size = old_size / 4;
793         }
794
795         if (map_size > rec_size) {
796                 wanted = map_size;
797         } else {
798                 wanted = rec_size;
799         }
800
801         /* We need room for the record header too. */
802         wanted = adjust_size(0, sizeof(struct tdb_used_record) + wanted);
803
804         ecode = tdb->methods->expand_file(tdb, wanted);
805         if (ecode != TDB_SUCCESS) {
806                 tdb_unlock_expand(tdb, F_WRLCK);
807                 return ecode;
808         }
809
810         /* We need to drop this lock before adding free record. */
811         tdb_unlock_expand(tdb, F_WRLCK);
812
813         tdb->stats.expands++;
814         return add_free_record(tdb, old_size, wanted, TDB_LOCK_WAIT);
815 }
816
817 /* This won't fail: it will expand the database if it has to. */
818 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
819                 uint64_t hash, unsigned magic, bool growing)
820 {
821         tdb_off_t off;
822
823         /* We can't hold pointers during this: we could unmap! */
824         assert(!tdb->direct_access);
825
826         for (;;) {
827                 enum TDB_ERROR ecode;
828                 off = get_free(tdb, keylen, datalen, growing, magic, hash);
829                 if (likely(off != 0))
830                         break;
831
832                 ecode = tdb_expand(tdb, adjust_size(keylen, datalen));
833                 if (ecode != TDB_SUCCESS) {
834                         return ecode;
835                 }
836         }
837
838         return off;
839 }