]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/free.c
foreach, iscsi, jbitset, jmap, opt, rbtree, sparse_bsearch, tally, tdb2: add LICENSE...
[ccan] / ccan / tdb2 / free.c
1  /* 
2    Trivial Database 2: free list/block handling
3    Copyright (C) Rusty Russell 2010
4    
5    This library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 3 of the License, or (at your option) any later version.
9
10    This library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 */
18 #include "private.h"
19 #include <ccan/likely/likely.h>
20 #include <ccan/ilog/ilog.h>
21 #include <time.h>
22 #include <assert.h>
23 #include <limits.h>
24
25 static unsigned fls64(uint64_t val)
26 {
27         return ilog64(val);
28 }
29
30 /* In which bucket would we find a particular record size? (ignoring header) */
31 unsigned int size_to_bucket(unsigned int zone_bits, tdb_len_t data_len)
32 {
33         unsigned int bucket;
34
35         /* We can't have records smaller than this. */
36         assert(data_len >= TDB_MIN_DATA_LEN);
37
38         /* Ignoring the header... */
39         if (data_len - TDB_MIN_DATA_LEN <= 64) {
40                 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 8. */
41                 bucket = (data_len - TDB_MIN_DATA_LEN) / 8;
42         } else {
43                 /* After that we go power of 2. */
44                 bucket = fls64(data_len - TDB_MIN_DATA_LEN) + 2;
45         }
46
47         if (unlikely(bucket > BUCKETS_FOR_ZONE(zone_bits)))
48                 bucket = BUCKETS_FOR_ZONE(zone_bits);
49         return bucket;
50 }
51
52 /* Subtract 1-byte tailer and header.  Then round up to next power of 2. */
53 static unsigned max_zone_bits(struct tdb_context *tdb)
54 {
55         return fls64(tdb->map_size-1-sizeof(struct tdb_header)-1) + 1;
56 }
57
58 /* Start by using a random zone to spread the load: returns the offset. */
59 static uint64_t random_zone(struct tdb_context *tdb)
60 {
61         struct free_zone_header zhdr;
62         tdb_off_t off = sizeof(struct tdb_header);
63         tdb_len_t half_bits;
64         uint64_t randbits = 0;
65         unsigned int i;
66
67         for (i = 0; i < 64; i += fls64(RAND_MAX)) 
68                 randbits ^= ((uint64_t)random()) << i;
69
70         /* FIXME: Does this work?  Test! */
71         half_bits = max_zone_bits(tdb) - 1;
72         do {
73                 /* Pick left or right side (not outside file) */
74                 if ((randbits & 1)
75                     && !tdb->methods->oob(tdb, off + (1ULL << half_bits)
76                                           + sizeof(zhdr), true)) {
77                         off += 1ULL << half_bits;
78                 }
79                 randbits >>= 1;
80
81                 if (tdb_read_convert(tdb, off, &zhdr, sizeof(zhdr)) == -1) 
82                         return TDB_OFF_ERR;
83
84                 if (zhdr.zone_bits == half_bits)
85                         return off;
86
87                 half_bits--;
88         } while (half_bits >= INITIAL_ZONE_BITS);
89
90         tdb->ecode = TDB_ERR_CORRUPT;
91         tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
92                  "random_zone: zone at %llu smaller than %u bits?",
93                  (long long)off, INITIAL_ZONE_BITS);
94         return TDB_OFF_ERR;
95 }
96
97 int tdb_zone_init(struct tdb_context *tdb)
98 {
99         tdb->zone_off = random_zone(tdb);
100         if (tdb->zone_off == TDB_OFF_ERR)
101                 return -1;
102         if (tdb_read_convert(tdb, tdb->zone_off,
103                              &tdb->zhdr, sizeof(tdb->zhdr)) == -1) 
104                 return -1;
105         return 0;
106 }
107
108 /* Where's the header, given a zone size of 1 << zone_bits? */
109 static tdb_off_t zone_off(tdb_off_t off, unsigned int zone_bits)
110 {
111         off -= sizeof(struct tdb_header);
112         return (off & ~((1ULL << zone_bits) - 1)) + sizeof(struct tdb_header);
113 }
114
115 /* Offset of a given bucket. */
116 /* FIXME: bucket can be "unsigned" everywhere, or even uint8/16. */
117 tdb_off_t bucket_off(tdb_off_t zone_off, tdb_off_t bucket)
118 {
119         return zone_off
120                 + sizeof(struct free_zone_header)
121                 + bucket * sizeof(tdb_off_t);
122 }
123
124 /* Returns free_buckets + 1, or list number to search. */
125 static tdb_off_t find_free_head(struct tdb_context *tdb, tdb_off_t bucket)
126 {
127         /* Speculatively search for a non-zero bucket. */
128         return tdb_find_nonzero_off(tdb, bucket_off(tdb->zone_off, 0),
129                                     bucket,
130                                     BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits) + 1);
131 }
132
133 /* Remove from free bucket. */
134 static int remove_from_list(struct tdb_context *tdb,
135                             tdb_off_t b_off, struct tdb_free_record *r)
136 {
137         tdb_off_t off;
138
139         /* Front of list? */
140         if (r->prev == 0) {
141                 off = b_off;
142         } else {
143                 off = r->prev + offsetof(struct tdb_free_record, next);
144         }
145         /* r->prev->next = r->next */
146         if (tdb_write_off(tdb, off, r->next)) {
147                 return -1;
148         }
149
150         if (r->next != 0) {
151                 off = r->next + offsetof(struct tdb_free_record, prev);
152                 /* r->next->prev = r->prev */
153                 if (tdb_write_off(tdb, off, r->prev)) {
154                         return -1;
155                 }
156         }
157         return 0;
158 }
159
160 /* Enqueue in this free bucket. */
161 static int enqueue_in_free(struct tdb_context *tdb,
162                            tdb_off_t b_off,
163                            tdb_off_t off,
164                            struct tdb_free_record *new)
165 {
166         new->prev = 0;
167         /* new->next = head. */
168         new->next = tdb_read_off(tdb, b_off);
169         if (new->next == TDB_OFF_ERR)
170                 return -1;
171
172         if (new->next) {
173                 /* next->prev = new. */
174                 if (tdb_write_off(tdb, new->next
175                                   + offsetof(struct tdb_free_record, prev),
176                                   off) != 0)
177                         return -1;
178         }
179         /* head = new */
180         if (tdb_write_off(tdb, b_off, off) != 0)
181                 return -1;
182
183         return tdb_write_convert(tdb, off, new, sizeof(*new));
184 }
185
186 /* List need not be locked. */
187 int add_free_record(struct tdb_context *tdb,
188                     unsigned int zone_bits,
189                     tdb_off_t off, tdb_len_t len_with_header)
190 {
191         struct tdb_free_record new;
192         tdb_off_t b_off;
193         int ret;
194
195         assert(len_with_header >= sizeof(new));
196         assert(zone_bits < (1 << 6));
197
198         new.magic_and_meta = TDB_FREE_MAGIC | zone_bits;
199         new.data_len = len_with_header - sizeof(struct tdb_used_record);
200
201         b_off = bucket_off(zone_off(off, zone_bits),
202                            size_to_bucket(zone_bits, new.data_len));
203         if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) != 0)
204                 return -1;
205
206         ret = enqueue_in_free(tdb, b_off, off, &new);
207         tdb_unlock_free_bucket(tdb, b_off);
208         return ret;
209 }
210
211 /* If we have enough left over to be useful, split that off. */
212 static int to_used_record(struct tdb_context *tdb,
213                           unsigned int zone_bits,
214                           tdb_off_t off,
215                           tdb_len_t needed,
216                           tdb_len_t total_len,
217                           tdb_len_t *actual)
218 {
219         struct tdb_used_record used;
220         tdb_len_t leftover;
221
222         leftover = total_len - needed;
223         if (leftover < sizeof(struct tdb_free_record))
224                 leftover = 0;
225
226         *actual = total_len - leftover;
227
228         if (leftover) {
229                 if (add_free_record(tdb, zone_bits,
230                                     off + sizeof(used) + *actual,
231                                     total_len - needed))
232                         return -1;
233         }
234         return 0;
235 }
236
237 /* Note: we unlock the current bucket if we coalesce or fail. */
238 static int coalesce(struct tdb_context *tdb,
239                     tdb_off_t zone_off, unsigned zone_bits,
240                     tdb_off_t off, tdb_off_t b_off, tdb_len_t data_len)
241 {
242         struct tdb_free_record pad, *r;
243         tdb_off_t end = off + sizeof(struct tdb_used_record) + data_len;
244
245         while (end < (zone_off + (1ULL << zone_bits))) {
246                 tdb_off_t nb_off;
247
248                 /* FIXME: do tdb_get here and below really win? */
249                 r = tdb_get(tdb, end, &pad, sizeof(pad));
250                 if (!r)
251                         goto err;
252
253                 if (frec_magic(r) != TDB_FREE_MAGIC)
254                         break;
255
256                 nb_off = bucket_off(zone_off,
257                                     size_to_bucket(zone_bits, r->data_len));
258
259                 /* We may be violating lock order here, so best effort. */
260                 if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT) == -1)
261                         break;
262
263                 /* Now we have lock, re-check. */
264                 r = tdb_get(tdb, end, &pad, sizeof(pad));
265                 if (!r) {
266                         tdb_unlock_free_bucket(tdb, nb_off);
267                         goto err;
268                 }
269
270                 if (unlikely(frec_magic(r) != TDB_FREE_MAGIC)) {
271                         tdb_unlock_free_bucket(tdb, nb_off);
272                         break;
273                 }
274
275                 if (remove_from_list(tdb, nb_off, r) == -1) {
276                         tdb_unlock_free_bucket(tdb, nb_off);
277                         goto err;
278                 }
279
280                 end += sizeof(struct tdb_used_record) + r->data_len;
281                 tdb_unlock_free_bucket(tdb, nb_off);
282         }
283
284         /* Didn't find any adjacent free? */
285         if (end == off + sizeof(struct tdb_used_record) + data_len)
286                 return 0;
287
288         /* OK, expand record */
289         r = tdb_get(tdb, off, &pad, sizeof(pad));
290         if (!r)
291                 goto err;
292
293         if (r->data_len != data_len) {
294                 tdb->ecode = TDB_ERR_CORRUPT;
295                 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
296                          "coalesce: expected data len %llu not %llu\n",
297                          (long long)data_len, (long long)r->data_len);
298                 goto err;
299         }
300
301         if (remove_from_list(tdb, b_off, r) == -1)
302                 goto err;
303
304         /* We have to drop this to avoid deadlocks. */
305         tdb_unlock_free_bucket(tdb, b_off);
306
307         if (add_free_record(tdb, zone_bits, off, end - off) == -1)
308                 return -1;
309         return 1;
310
311 err:
312         /* To unify error paths, we *always* unlock bucket on error. */
313         tdb_unlock_free_bucket(tdb, b_off);
314         return -1;
315 }
316
317 /* We need size bytes to put our key and data in. */
318 static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
319                                 tdb_off_t zone_off,
320                                 unsigned zone_bits,
321                                 tdb_off_t bucket,
322                                 size_t size,
323                                 tdb_len_t *actual)
324 {
325         tdb_off_t off, b_off,best_off;
326         struct tdb_free_record pad, best = { 0 }, *r;
327         double multiplier;
328
329 again:
330         b_off = bucket_off(zone_off, bucket);
331
332         /* FIXME: Try non-blocking wait first, to measure contention.
333          * If we're contented, try switching zones, and don't enlarge zone
334          * next time (we want more zones). */
335         /* Lock this bucket. */
336         if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == -1) {
337                 return TDB_OFF_ERR;
338         }
339
340         best.data_len = -1ULL;
341         best_off = 0;
342         /* FIXME: Start with larger multiplier if we're growing. */
343         multiplier = 1.0;
344
345         /* Walk the list to see if any are large enough, getting less fussy
346          * as we go. */
347         off = tdb_read_off(tdb, b_off);
348         if (unlikely(off == TDB_OFF_ERR))
349                 goto unlock_err;
350
351         while (off) {
352                 /* FIXME: Does tdb_get win anything here? */
353                 r = tdb_get(tdb, off, &pad, sizeof(*r));
354                 if (!r)
355                         goto unlock_err;
356
357                 if (frec_magic(r) != TDB_FREE_MAGIC) {
358                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
359                                  "lock_and_alloc: %llu non-free 0x%llx\n",
360                                  (long long)off, (long long)r->magic_and_meta);
361                         goto unlock_err;
362                 }
363
364                 if (r->data_len >= size && r->data_len < best.data_len) {
365                         best_off = off;
366                         best = *r;
367                 }
368
369                 if (best.data_len < size * multiplier && best_off)
370                         goto use_best;
371
372                 multiplier *= 1.01;
373
374                 /* Since we're going slow anyway, try coalescing here. */
375                 switch (coalesce(tdb, zone_off, zone_bits, off, b_off,
376                                  r->data_len)) {
377                 case -1:
378                         /* This has already unlocked on error. */
379                         return -1;
380                 case 1:
381                         /* This has unlocked list, restart. */
382                         goto again;
383                 }
384                 off = r->next;
385         }
386
387         /* If we found anything at all, use it. */
388         if (best_off) {
389         use_best:
390                 /* We're happy with this size: take it. */
391                 if (remove_from_list(tdb, b_off, &best) != 0)
392                         goto unlock_err;
393                 tdb_unlock_free_bucket(tdb, b_off);
394
395                 if (to_used_record(tdb, zone_bits, best_off, size,
396                                    best.data_len, actual)) {
397                         return -1;
398                 }
399                 return best_off;
400         }
401
402         tdb_unlock_free_bucket(tdb, b_off);
403         return 0;
404
405 unlock_err:
406         tdb_unlock_free_bucket(tdb, b_off);
407         return TDB_OFF_ERR;
408 }
409
410 static bool next_zone(struct tdb_context *tdb)
411 {
412         tdb_off_t next = tdb->zone_off + (1ULL << tdb->zhdr.zone_bits);
413
414         /* We must have a header. */
415         if (tdb->methods->oob(tdb, next + sizeof(tdb->zhdr), true))
416                 return false;
417
418         tdb->zone_off = next;
419         return tdb_read_convert(tdb, next, &tdb->zhdr, sizeof(tdb->zhdr)) == 0;
420 }
421
422 /* Offset returned is within current zone (which it may alter). */
423 static tdb_off_t get_free(struct tdb_context *tdb, size_t size,
424                           tdb_len_t *actual)
425 {
426         tdb_off_t start_zone = tdb->zone_off, off;
427         bool wrapped = false;
428
429         /* FIXME: If we don't get a hit in the first bucket we want,
430          * try changing zones for next time.  That should help wear
431          * zones evenly, so we don't need to search all of them before
432          * expanding. */
433         while (!wrapped || tdb->zone_off != start_zone) {
434                 tdb_off_t b;
435
436                 /* Shortcut for really huge allocations... */
437                 if ((size >> tdb->zhdr.zone_bits) != 0)
438                         continue;
439
440                 /* Start at exact size bucket, and search up... */
441                 b = size_to_bucket(tdb->zhdr.zone_bits, size);
442                 for (b = find_free_head(tdb, b);
443                      b <= BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits);
444                      b += find_free_head(tdb, b + 1)) {
445                         /* Try getting one from list. */
446                         off = lock_and_alloc(tdb, tdb->zone_off,
447                                              tdb->zhdr.zone_bits,
448                                              b, size, actual);
449                         if (off == TDB_OFF_ERR)
450                                 return TDB_OFF_ERR;
451                         if (off != 0)
452                                 return off;
453                         /* Didn't work.  Try next bucket. */
454                 }
455
456                 /* Didn't work, try next zone, if it exists. */
457                 if (!next_zone(tdb)) {
458                         wrapped = true;
459                         tdb->zone_off = sizeof(struct tdb_header);
460                         if (tdb_read_convert(tdb, tdb->zone_off,
461                                              &tdb->zhdr, sizeof(tdb->zhdr))) {
462                                 return TDB_OFF_ERR;
463                         }
464                 }
465         }
466         return 0;
467 }
468
469 int set_header(struct tdb_context *tdb,
470                struct tdb_used_record *rec,
471                uint64_t keylen, uint64_t datalen,
472                uint64_t actuallen, uint64_t hash,
473                unsigned int zone_bits)
474 {
475         uint64_t keybits = (fls64(keylen) + 1) / 2;
476
477         /* Use bottom bits of hash, so it's independent of hash table size. */
478         rec->magic_and_meta
479                 = zone_bits
480                 | ((hash & ((1 << 5)-1)) << 6)
481                 | ((actuallen - (keylen + datalen)) << 11)
482                 | (keybits << 43)
483                 | (TDB_MAGIC << 48);
484         rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
485
486         /* Encoding can fail on big values. */
487         if (rec_key_length(rec) != keylen
488             || rec_data_length(rec) != datalen
489             || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
490                 tdb->ecode = TDB_ERR_IO;
491                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
492                          "Could not encode k=%llu,d=%llu,a=%llu\n",
493                          (long long)keylen, (long long)datalen,
494                          (long long)actuallen);
495                 return -1;
496         }
497         return 0;
498 }
499
500 static bool zones_happy(struct tdb_context *tdb)
501 {
502         /* FIXME: look at distribution of zones. */
503         return true;
504 }
505
506 /* Assume we want buckets up to the comfort factor. */
507 static tdb_len_t overhead(unsigned int zone_bits)
508 {
509         return sizeof(struct free_zone_header)
510                 + (BUCKETS_FOR_ZONE(zone_bits) + 1) * sizeof(tdb_off_t);
511 }
512
513 /* Expand the database (by adding a zone). */
514 static int tdb_expand(struct tdb_context *tdb, tdb_len_t size)
515 {
516         uint64_t old_size;
517         tdb_off_t off;
518         uint8_t zone_bits;
519         unsigned int num_buckets;
520         tdb_len_t wanted;
521         struct free_zone_header zhdr;
522         bool enlarge_zone;
523
524         /* We need room for the record header too. */
525         wanted = sizeof(struct tdb_used_record) + size;
526
527         /* Only one person can expand file at a time. */
528         if (tdb_lock_expand(tdb, F_WRLCK) != 0)
529                 return -1;
530
531         /* Someone else may have expanded the file, so retry. */
532         old_size = tdb->map_size;
533         tdb->methods->oob(tdb, tdb->map_size + 1, true);
534         if (tdb->map_size != old_size)
535                 goto success;
536
537         /* FIXME: Tailer is a bogus optimization, remove it. */
538         /* zone bits tailer char is protected by EXPAND lock. */
539         if (tdb->methods->read(tdb, old_size - 1, &zone_bits, 1) == -1)
540                 goto fail;
541
542         /* If zones aren't working well, add larger zone if possible. */
543         enlarge_zone = !zones_happy(tdb);
544
545         /* New zone can be between zone_bits or larger if we're on the right
546          * boundary. */
547         for (;;) {
548                 /* Does this fit the allocation comfortably? */
549                 if ((1ULL << zone_bits) >= overhead(zone_bits) + wanted) {
550                         /* Only let enlarge_zone enlarge us once. */
551                         if (!enlarge_zone)
552                                 break;
553                         enlarge_zone = false;
554                 }
555                 if ((old_size - 1 - sizeof(struct tdb_header))
556                     & (1 << zone_bits))
557                         break;
558                 zone_bits++;
559         }
560
561         zhdr.zone_bits = zone_bits;
562         num_buckets = BUCKETS_FOR_ZONE(zone_bits);
563
564         /* FIXME: I don't think we need to expand to full zone, do we? */
565         if (tdb->methods->expand_file(tdb, 1ULL << zone_bits) == -1)
566                 goto fail;
567
568         /* Write new tailer. */
569         if (tdb->methods->write(tdb, tdb->map_size - 1, &zone_bits, 1) == -1)
570                 goto fail;
571
572         /* Write new zone header (just before old tailer). */
573         off = old_size - 1;
574         if (tdb_write_convert(tdb, off, &zhdr, sizeof(zhdr)) == -1)
575                 goto fail;
576
577         /* Now write empty buckets. */
578         off += sizeof(zhdr);
579         if (zero_out(tdb, off, (num_buckets+1) * sizeof(tdb_off_t)) == -1)
580                 goto fail;
581         off += (num_buckets+1) * sizeof(tdb_off_t);
582
583         /* Now add the rest as our free record. */
584         if (add_free_record(tdb, zone_bits, off, tdb->map_size-1-off) == -1)
585                 goto fail;
586
587         /* Try allocating from this zone now. */
588         tdb->zone_off = old_size - 1;
589         tdb->zhdr = zhdr;
590
591 success:
592         tdb_unlock_expand(tdb, F_WRLCK);
593         return 0;
594
595 fail:
596         tdb_unlock_expand(tdb, F_WRLCK);
597         return -1;
598 }
599
600 static tdb_len_t adjust_size(size_t keylen, size_t datalen, bool growing)
601 {
602         tdb_len_t size = keylen + datalen;
603
604         if (size < TDB_MIN_DATA_LEN)
605                 size = TDB_MIN_DATA_LEN;
606
607         /* Overallocate if this is coming from an enlarging store. */
608         if (growing)
609                 size += datalen / 2;
610
611         /* Round to next uint64_t boundary. */
612         return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
613 }
614
615 /* This won't fail: it will expand the database if it has to. */
616 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
617                 uint64_t hash, bool growing)
618 {
619         tdb_off_t off;
620         tdb_len_t size, actual;
621         struct tdb_used_record rec;
622
623         /* We can't hold pointers during this: we could unmap! */
624         assert(!tdb->direct_access);
625
626         size = adjust_size(keylen, datalen, growing);
627
628 again:
629         off = get_free(tdb, size, &actual);
630         if (unlikely(off == TDB_OFF_ERR))
631                 return off;
632
633         if (unlikely(off == 0)) {
634                 if (tdb_expand(tdb, size) == -1)
635                         return TDB_OFF_ERR;
636                 goto again;
637         }
638
639         /* Some supergiant values can't be encoded. */
640         /* FIXME: Check before, and limit actual in get_free. */
641         if (set_header(tdb, &rec, keylen, datalen, actual, hash,
642                        tdb->zhdr.zone_bits) != 0) {
643                 add_free_record(tdb, tdb->zhdr.zone_bits, off,
644                                 sizeof(rec) + actual);
645                 return TDB_OFF_ERR;
646         }
647
648         if (tdb_write_convert(tdb, off, &rec, sizeof(rec)) != 0)
649                 return TDB_OFF_ERR;
650         
651         return off;
652 }