]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/free.c
tdb2: don't hold access to tdb mmap during traverse.
[ccan] / ccan / tdb2 / free.c
1  /* 
2    Trivial Database 2: free list/block handling
3    Copyright (C) Rusty Russell 2010
4    
5    This library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 3 of the License, or (at your option) any later version.
9
10    This library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 */
18 #include "private.h"
19 #include <ccan/likely/likely.h>
20 #include <time.h>
21 #include <assert.h>
22 #include <limits.h>
23
24 /* We have to be able to fit a free record here. */
25 #define MIN_DATA_LEN    \
26         (sizeof(struct tdb_free_record) - sizeof(struct tdb_used_record))
27
28 static unsigned fls64(uint64_t val)
29 {
30 #if HAVE_BUILTIN_CLZL
31         if (val <= ULONG_MAX) {
32                 /* This is significantly faster! */
33                 return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
34         } else {
35 #endif
36         uint64_t r = 64;
37
38         if (!val)
39                 return 0;
40         if (!(val & 0xffffffff00000000ull)) {
41                 val <<= 32;
42                 r -= 32;
43         }
44         if (!(val & 0xffff000000000000ull)) {
45                 val <<= 16;
46                 r -= 16;
47         }
48         if (!(val & 0xff00000000000000ull)) {
49                 val <<= 8;
50                 r -= 8;
51         }
52         if (!(val & 0xf000000000000000ull)) {
53                 val <<= 4;
54                 r -= 4;
55         }
56         if (!(val & 0xc000000000000000ull)) {
57                 val <<= 2;
58                 r -= 2;
59         }
60         if (!(val & 0x8000000000000000ull)) {
61                 val <<= 1;
62                 r -= 1;
63         }
64         return r;
65 #if HAVE_BUILTIN_CLZL
66         }
67 #endif
68 }
69
70 /* In which bucket would we find a particular record size? (ignoring header) */
71 unsigned int size_to_bucket(unsigned int zone_bits, tdb_len_t data_len)
72 {
73         unsigned int bucket;
74
75         /* We can't have records smaller than this. */
76         assert(data_len >= MIN_DATA_LEN);
77
78         /* Ignoring the header... */
79         if (data_len - MIN_DATA_LEN <= 64) {
80                 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 6. */
81                 bucket = (data_len - MIN_DATA_LEN) / 8;
82         } else {
83                 /* After that we go power of 2. */
84                 bucket = fls64(data_len - MIN_DATA_LEN) + 2;
85         }
86
87         if (unlikely(bucket > BUCKETS_FOR_ZONE(zone_bits)))
88                 bucket = BUCKETS_FOR_ZONE(zone_bits);
89         return bucket;
90 }
91
92 /* Subtract 1-byte tailer and header.  Then round up to next power of 2. */
93 static unsigned max_zone_bits(struct tdb_context *tdb)
94 {
95         return fls64(tdb->map_size-1-sizeof(struct tdb_header)-1) + 1;
96 }
97
98 /* Start by using a random zone to spread the load: returns the offset. */
99 static uint64_t random_zone(struct tdb_context *tdb)
100 {
101         struct free_zone_header zhdr;
102         tdb_off_t off = sizeof(struct tdb_header);
103         tdb_len_t half_bits;
104         uint64_t randbits = 0;
105         unsigned int i;
106
107         for (i = 0; i < 64; i += fls64(RAND_MAX)) 
108                 randbits ^= ((uint64_t)random()) << i;
109
110         /* FIXME: Does this work?  Test! */
111         half_bits = max_zone_bits(tdb) - 1;
112         do {
113                 /* Pick left or right side (not outside file) */
114                 if ((randbits & 1)
115                     && !tdb->methods->oob(tdb, off + (1ULL << half_bits)
116                                           + sizeof(zhdr), true)) {
117                         off += 1ULL << half_bits;
118                 }
119                 randbits >>= 1;
120
121                 if (tdb_read_convert(tdb, off, &zhdr, sizeof(zhdr)) == -1) 
122                         return TDB_OFF_ERR;
123
124                 if (zhdr.zone_bits == half_bits)
125                         return off;
126
127                 half_bits--;
128         } while (half_bits >= INITIAL_ZONE_BITS);
129
130         tdb->ecode = TDB_ERR_CORRUPT;
131         tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
132                  "random_zone: zone at %llu smaller than %u bits?",
133                  (long long)off, INITIAL_ZONE_BITS);
134         return TDB_OFF_ERR;
135 }
136
137 int tdb_zone_init(struct tdb_context *tdb)
138 {
139         tdb->zone_off = random_zone(tdb);
140         if (tdb->zone_off == TDB_OFF_ERR)
141                 return -1;
142         if (tdb_read_convert(tdb, tdb->zone_off,
143                              &tdb->zhdr, sizeof(tdb->zhdr)) == -1) 
144                 return -1;
145         return 0;
146 }
147
148 /* Where's the header, given a zone size of 1 << zone_bits? */
149 static tdb_off_t zone_off(tdb_off_t off, unsigned int zone_bits)
150 {
151         off -= sizeof(struct tdb_header);
152         return (off & ~((1ULL << zone_bits) - 1)) + sizeof(struct tdb_header);
153 }
154
155 /* Offset of a given bucket. */
156 /* FIXME: bucket can be "unsigned" everywhere, or even uint8/16. */
157 tdb_off_t bucket_off(tdb_off_t zone_off, tdb_off_t bucket)
158 {
159         return zone_off
160                 + sizeof(struct free_zone_header)
161                 + bucket * sizeof(tdb_off_t);
162 }
163
164 /* Returns free_buckets + 1, or list number to search. */
165 static tdb_off_t find_free_head(struct tdb_context *tdb, tdb_off_t bucket)
166 {
167         /* Speculatively search for a non-zero bucket. */
168         return tdb_find_nonzero_off(tdb, bucket_off(tdb->zone_off, 0),
169                                     bucket,
170                                     BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits) + 1);
171 }
172
173 /* Remove from free bucket. */
174 static int remove_from_list(struct tdb_context *tdb,
175                             tdb_off_t b_off, struct tdb_free_record *r)
176 {
177         tdb_off_t off;
178
179         /* Front of list? */
180         if (r->prev == 0) {
181                 off = b_off;
182         } else {
183                 off = r->prev + offsetof(struct tdb_free_record, next);
184         }
185         /* r->prev->next = r->next */
186         if (tdb_write_off(tdb, off, r->next)) {
187                 return -1;
188         }
189
190         if (r->next != 0) {
191                 off = r->next + offsetof(struct tdb_free_record, prev);
192                 /* r->next->prev = r->prev */
193                 if (tdb_write_off(tdb, off, r->prev)) {
194                         return -1;
195                 }
196         }
197         return 0;
198 }
199
200 /* Enqueue in this free bucket. */
201 static int enqueue_in_free(struct tdb_context *tdb,
202                            tdb_off_t b_off,
203                            tdb_off_t off,
204                            struct tdb_free_record *new)
205 {
206         new->prev = 0;
207         /* new->next = head. */
208         new->next = tdb_read_off(tdb, b_off);
209         if (new->next == TDB_OFF_ERR)
210                 return -1;
211
212         if (new->next) {
213                 /* next->prev = new. */
214                 if (tdb_write_off(tdb, new->next
215                                   + offsetof(struct tdb_free_record, prev),
216                                   off) != 0)
217                         return -1;
218         }
219         /* head = new */
220         if (tdb_write_off(tdb, b_off, off) != 0)
221                 return -1;
222
223         return tdb_write_convert(tdb, off, new, sizeof(*new));
224 }
225
226 /* List need not be locked. */
227 int add_free_record(struct tdb_context *tdb,
228                     unsigned int zone_bits,
229                     tdb_off_t off, tdb_len_t len_with_header)
230 {
231         struct tdb_free_record new;
232         tdb_off_t b_off;
233         int ret;
234
235         assert(len_with_header >= sizeof(new));
236         assert(zone_bits < (1 << 6));
237
238         new.magic_and_meta = TDB_FREE_MAGIC | zone_bits;
239         new.data_len = len_with_header - sizeof(struct tdb_used_record);
240
241         b_off = bucket_off(zone_off(off, zone_bits),
242                            size_to_bucket(zone_bits, new.data_len));
243         if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) != 0)
244                 return -1;
245
246         ret = enqueue_in_free(tdb, b_off, off, &new);
247         tdb_unlock_free_bucket(tdb, b_off);
248         return ret;
249 }
250
251 /* If we have enough left over to be useful, split that off. */
252 static int to_used_record(struct tdb_context *tdb,
253                           unsigned int zone_bits,
254                           tdb_off_t off,
255                           tdb_len_t needed,
256                           tdb_len_t total_len,
257                           tdb_len_t *actual)
258 {
259         struct tdb_used_record used;
260         tdb_len_t leftover;
261
262         leftover = total_len - needed;
263         if (leftover < sizeof(struct tdb_free_record))
264                 leftover = 0;
265
266         *actual = total_len - leftover;
267
268         if (leftover) {
269                 if (add_free_record(tdb, zone_bits,
270                                     off + sizeof(used) + *actual,
271                                     total_len - needed))
272                         return -1;
273         }
274         return 0;
275 }
276
277 /* Note: we unlock the current bucket if we coalesce or fail. */
278 static int coalesce(struct tdb_context *tdb,
279                     tdb_off_t zone_off, unsigned zone_bits,
280                     tdb_off_t off, tdb_off_t b_off, tdb_len_t data_len)
281 {
282         struct tdb_free_record pad, *r;
283         tdb_off_t end = off + sizeof(struct tdb_used_record) + data_len;
284
285         while (end < (zone_off + (1ULL << zone_bits))) {
286                 tdb_off_t nb_off;
287
288                 /* FIXME: do tdb_get here and below really win? */
289                 r = tdb_get(tdb, end, &pad, sizeof(pad));
290                 if (!r)
291                         goto err;
292
293                 if (frec_magic(r) != TDB_FREE_MAGIC)
294                         break;
295
296                 nb_off = bucket_off(zone_off,
297                                     size_to_bucket(zone_bits, r->data_len));
298
299                 /* We may be violating lock order here, so best effort. */
300                 if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT) == -1)
301                         break;
302
303                 /* Now we have lock, re-check. */
304                 r = tdb_get(tdb, end, &pad, sizeof(pad));
305                 if (!r) {
306                         tdb_unlock_free_bucket(tdb, nb_off);
307                         goto err;
308                 }
309
310                 if (unlikely(frec_magic(r) != TDB_FREE_MAGIC)) {
311                         tdb_unlock_free_bucket(tdb, nb_off);
312                         break;
313                 }
314
315                 if (remove_from_list(tdb, nb_off, r) == -1) {
316                         tdb_unlock_free_bucket(tdb, nb_off);
317                         goto err;
318                 }
319
320                 end += sizeof(struct tdb_used_record) + r->data_len;
321                 tdb_unlock_free_bucket(tdb, nb_off);
322         }
323
324         /* Didn't find any adjacent free? */
325         if (end == off + sizeof(struct tdb_used_record) + data_len)
326                 return 0;
327
328         /* OK, expand record */
329         r = tdb_get(tdb, off, &pad, sizeof(pad));
330         if (!r)
331                 goto err;
332
333         if (r->data_len != data_len) {
334                 tdb->ecode = TDB_ERR_CORRUPT;
335                 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
336                          "coalesce: expected data len %llu not %llu\n",
337                          (long long)data_len, (long long)r->data_len);
338                 goto err;
339         }
340
341         if (remove_from_list(tdb, b_off, r) == -1)
342                 goto err;
343
344         /* We have to drop this to avoid deadlocks. */
345         tdb_unlock_free_bucket(tdb, b_off);
346
347         if (add_free_record(tdb, zone_bits, off, end - off) == -1)
348                 return -1;
349         return 1;
350
351 err:
352         /* To unify error paths, we *always* unlock bucket on error. */
353         tdb_unlock_free_bucket(tdb, b_off);
354         return -1;
355 }
356
357 /* We need size bytes to put our key and data in. */
358 static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
359                                 tdb_off_t zone_off,
360                                 unsigned zone_bits,
361                                 tdb_off_t bucket,
362                                 size_t size,
363                                 tdb_len_t *actual)
364 {
365         tdb_off_t off, b_off,best_off;
366         struct tdb_free_record pad, best = { 0 }, *r;
367         double multiplier;
368
369 again:
370         b_off = bucket_off(zone_off, bucket);
371
372         /* Lock this bucket. */
373         if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == -1) {
374                 return TDB_OFF_ERR;
375         }
376
377         best.data_len = -1ULL;
378         best_off = 0;
379         /* FIXME: Start with larger multiplier if we're growing. */
380         multiplier = 1.0;
381
382         /* Walk the list to see if any are large enough, getting less fussy
383          * as we go. */
384         off = tdb_read_off(tdb, b_off);
385         if (unlikely(off == TDB_OFF_ERR))
386                 goto unlock_err;
387
388         while (off) {
389                 /* FIXME: Does tdb_get win anything here? */
390                 r = tdb_get(tdb, off, &pad, sizeof(*r));
391                 if (!r)
392                         goto unlock_err;
393
394                 if (frec_magic(r) != TDB_FREE_MAGIC) {
395                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
396                                  "lock_and_alloc: %llu non-free 0x%llx\n",
397                                  (long long)off, (long long)r->magic_and_meta);
398                         goto unlock_err;
399                 }
400
401                 if (r->data_len >= size && r->data_len < best.data_len) {
402                         best_off = off;
403                         best = *r;
404                 }
405
406                 if (best.data_len < size * multiplier && best_off)
407                         goto use_best;
408
409                 multiplier *= 1.01;
410
411                 /* Since we're going slow anyway, try coalescing here. */
412                 switch (coalesce(tdb, zone_off, zone_bits, off, b_off,
413                                  r->data_len)) {
414                 case -1:
415                         /* This has already unlocked on error. */
416                         return -1;
417                 case 1:
418                         /* This has unlocked list, restart. */
419                         goto again;
420                 }
421                 off = r->next;
422         }
423
424         /* If we found anything at all, use it. */
425         if (best_off) {
426         use_best:
427                 /* We're happy with this size: take it. */
428                 if (remove_from_list(tdb, b_off, &best) != 0)
429                         goto unlock_err;
430                 tdb_unlock_free_bucket(tdb, b_off);
431
432                 if (to_used_record(tdb, zone_bits, best_off, size,
433                                    best.data_len, actual)) {
434                         return -1;
435                 }
436                 return best_off;
437         }
438
439         tdb_unlock_free_bucket(tdb, b_off);
440         return 0;
441
442 unlock_err:
443         tdb_unlock_free_bucket(tdb, b_off);
444         return TDB_OFF_ERR;
445 }
446
447 static bool next_zone(struct tdb_context *tdb)
448 {
449         tdb_off_t next = tdb->zone_off + (1ULL << tdb->zhdr.zone_bits);
450
451         /* We must have a header. */
452         if (tdb->methods->oob(tdb, next + sizeof(tdb->zhdr), true))
453                 return false;
454
455         tdb->zone_off = next;
456         return tdb_read_convert(tdb, next, &tdb->zhdr, sizeof(tdb->zhdr)) == 0;
457 }
458
459 /* Offset returned is within current zone (which it may alter). */
460 static tdb_off_t get_free(struct tdb_context *tdb, size_t size,
461                           tdb_len_t *actual)
462 {
463         tdb_off_t start_zone = tdb->zone_off, off;
464         bool wrapped = false;
465
466         while (!wrapped || tdb->zone_off != start_zone) {
467                 tdb_off_t b;
468
469                 /* Shortcut for really huge allocations... */
470                 if ((size >> tdb->zhdr.zone_bits) != 0)
471                         continue;
472
473                 /* Start at exact size bucket, and search up... */
474                 b = size_to_bucket(tdb->zhdr.zone_bits, size);
475                 for (b = find_free_head(tdb, b);
476                      b <= BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits);
477                      b += find_free_head(tdb, b + 1)) {
478                         /* Try getting one from list. */
479                         off = lock_and_alloc(tdb, tdb->zone_off,
480                                              tdb->zhdr.zone_bits,
481                                              b, size, actual);
482                         if (off == TDB_OFF_ERR)
483                                 return TDB_OFF_ERR;
484                         if (off != 0)
485                                 return off;
486                         /* Didn't work.  Try next bucket. */
487                 }
488
489                 /* Didn't work, try next zone, if it exists. */
490                 if (!next_zone(tdb)) {
491                         wrapped = true;
492                         tdb->zone_off = sizeof(struct tdb_header);
493                         if (tdb_read_convert(tdb, tdb->zone_off,
494                                              &tdb->zhdr, sizeof(tdb->zhdr))) {
495                                 return TDB_OFF_ERR;
496                         }
497                 }
498         }
499         return 0;
500 }
501
502 int set_header(struct tdb_context *tdb,
503                struct tdb_used_record *rec,
504                uint64_t keylen, uint64_t datalen,
505                uint64_t actuallen, uint64_t hash,
506                unsigned int zone_bits)
507 {
508         uint64_t keybits = (fls64(keylen) + 1) / 2;
509
510         /* Use bottom bits of hash, so it's independent of hash table size. */
511         rec->magic_and_meta
512                 = zone_bits
513                 | ((hash & ((1 << 5)-1)) << 6)
514                 | ((actuallen - (keylen + datalen)) << 11)
515                 | (keybits << 43)
516                 | (TDB_MAGIC << 48);
517         rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
518
519         /* Encoding can fail on big values. */
520         if (rec_key_length(rec) != keylen
521             || rec_data_length(rec) != datalen
522             || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
523                 tdb->ecode = TDB_ERR_IO;
524                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
525                          "Could not encode k=%llu,d=%llu,a=%llu\n",
526                          (long long)keylen, (long long)datalen,
527                          (long long)actuallen);
528                 return -1;
529         }
530         return 0;
531 }
532
533 static bool zones_happy(struct tdb_context *tdb)
534 {
535         /* FIXME: look at distribution of zones. */
536         return true;
537 }
538
539 /* Assume we want buckets up to the comfort factor. */
540 static tdb_len_t overhead(unsigned int zone_bits)
541 {
542         return sizeof(struct free_zone_header)
543                 + (BUCKETS_FOR_ZONE(zone_bits) + 1) * sizeof(tdb_off_t);
544 }
545
546 /* Expand the database (by adding a zone). */
547 static int tdb_expand(struct tdb_context *tdb, tdb_len_t size)
548 {
549         uint64_t old_size;
550         tdb_off_t off;
551         uint8_t zone_bits;
552         unsigned int num_buckets;
553         tdb_len_t wanted;
554         struct free_zone_header zhdr;
555         bool enlarge_zone;
556
557         /* We need room for the record header too. */
558         wanted = sizeof(struct tdb_used_record) + size;
559
560         /* Only one person can expand file at a time. */
561         if (tdb_lock_expand(tdb, F_WRLCK) != 0)
562                 return -1;
563
564         /* Someone else may have expanded the file, so retry. */
565         old_size = tdb->map_size;
566         tdb->methods->oob(tdb, tdb->map_size + 1, true);
567         if (tdb->map_size != old_size)
568                 goto success;
569
570         /* zone bits tailer char is protected by EXPAND lock. */
571         if (tdb->methods->read(tdb, old_size - 1, &zone_bits, 1) == -1)
572                 goto fail;
573
574         /* If zones aren't working well, add larger zone if possible. */
575         enlarge_zone = !zones_happy(tdb);
576
577         /* New zone can be between zone_bits or larger if we're on the right
578          * boundary. */
579         for (;;) {
580                 /* Does this fit the allocation comfortably? */
581                 if ((1ULL << zone_bits) >= overhead(zone_bits) + wanted) {
582                         /* Only let enlarge_zone enlarge us once. */
583                         if (!enlarge_zone)
584                                 break;
585                         enlarge_zone = false;
586                 }
587                 if ((old_size - 1 - sizeof(struct tdb_header))
588                     & (1 << zone_bits))
589                         break;
590                 zone_bits++;
591         }
592
593         zhdr.zone_bits = zone_bits;
594         num_buckets = BUCKETS_FOR_ZONE(zone_bits);
595
596         if (tdb->methods->expand_file(tdb, 1ULL << zone_bits) == -1)
597                 goto fail;
598
599         /* Write new tailer. */
600         if (tdb->methods->write(tdb, tdb->map_size - 1, &zone_bits, 1) == -1)
601                 goto fail;
602
603         /* Write new zone header (just before old tailer). */
604         off = old_size - 1;
605         if (tdb_write_convert(tdb, off, &zhdr, sizeof(zhdr)) == -1)
606                 goto fail;
607
608         /* Now write empty buckets. */
609         off += sizeof(zhdr);
610         if (zero_out(tdb, off, (num_buckets+1) * sizeof(tdb_off_t)) == -1)
611                 goto fail;
612         off += (num_buckets+1) * sizeof(tdb_off_t);
613
614         /* Now add the rest as our free record. */
615         if (add_free_record(tdb, zone_bits, off, tdb->map_size-1-off) == -1)
616                 goto fail;
617
618         /* Try allocating from this zone now. */
619         tdb->zone_off = old_size - 1;
620         tdb->zhdr = zhdr;
621
622 success:
623         tdb_unlock_expand(tdb, F_WRLCK);
624         return 0;
625
626 fail:
627         tdb_unlock_expand(tdb, F_WRLCK);
628         return -1;
629 }
630
631 static tdb_len_t adjust_size(size_t keylen, size_t datalen, bool growing)
632 {
633         tdb_len_t size = keylen + datalen;
634
635         if (size < MIN_DATA_LEN)
636                 size = MIN_DATA_LEN;
637
638         /* Overallocate if this is coming from an enlarging store. */
639         if (growing)
640                 size += datalen / 2;
641
642         /* Round to next uint64_t boundary. */
643         return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
644 }
645
646 /* This won't fail: it will expand the database if it has to. */
647 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
648                 uint64_t hash, bool growing)
649 {
650         tdb_off_t off;
651         tdb_len_t size, actual;
652         struct tdb_used_record rec;
653
654         /* We can't hold pointers during this: we could unmap! */
655         assert(!tdb->direct_access);
656
657         size = adjust_size(keylen, datalen, growing);
658
659 again:
660         off = get_free(tdb, size, &actual);
661         if (unlikely(off == TDB_OFF_ERR))
662                 return off;
663
664         if (unlikely(off == 0)) {
665                 if (tdb_expand(tdb, size) == -1)
666                         return TDB_OFF_ERR;
667                 goto again;
668         }
669
670         /* Some supergiant values can't be encoded. */
671         /* FIXME: Check before, and limit actual in get_free. */
672         if (set_header(tdb, &rec, keylen, datalen, actual, hash,
673                        tdb->zhdr.zone_bits) != 0) {
674                 add_free_record(tdb, tdb->zhdr.zone_bits, off,
675                                 sizeof(rec) + actual);
676                 return TDB_OFF_ERR;
677         }
678
679         if (tdb_write_convert(tdb, off, &rec, sizeof(rec)) != 0)
680                 return TDB_OFF_ERR;
681         
682         return off;
683 }