]> git.ozlabs.org Git - ccan/blob - ccan/alloc/alloc.c
licenses: clarify which BSD license it is.
[ccan] / ccan / alloc / alloc.c
1 #include <unistd.h>
2 #include <stdint.h>
3 #include <string.h>
4 #include <limits.h>
5 #include <assert.h>
6 #include <stdlib.h>
7 #include "alloc.h"
8 #include "bitops.h"
9 #include "tiny.h"
10 #include <ccan/build_assert/build_assert.h>
11 #include <ccan/likely/likely.h>
12 #include <ccan/alignof/alignof.h>
13 #include <ccan/short_types/short_types.h>
14 #include <ccan/compiler/compiler.h>
15 #include "config.h"
16
17 /*
18    Inspired by (and parts taken from) Andrew Tridgell's alloc_mmap:
19    http://samba.org/~tridge/junkcode/alloc_mmap/
20
21    Copyright (C) Andrew Tridgell 2007
22    
23    This library is free software; you can redistribute it and/or
24    modify it under the terms of the GNU Lesser General Public
25    License as published by the Free Software Foundation; either
26    version 2 of the License, or (at your option) any later version.
27
28    This library is distributed in the hope that it will be useful,
29    but WITHOUT ANY WARRANTY; without even the implied warranty of
30    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
31    Lesser General Public License for more details.
32
33    You should have received a copy of the GNU Lesser General Public
34    License along with this library; if not, write to the Free Software
35    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
36  */
37
38 /* We divide the pool into this many large pages (nearest power of 2) */
39 #define MAX_LARGE_PAGES (256UL)
40
41 /* 32 small pages == 1 large page. */
42 #define BITS_FROM_SMALL_TO_LARGE_PAGE 5
43
44 #define MAX_SMALL_PAGES (MAX_LARGE_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE)
45
46 /* Smallest pool size for this scheme: 128-byte small pages.  That's
47  * 9/13% overhead for 32/64 bit. */
48 #define MIN_USEFUL_SIZE (MAX_SMALL_PAGES * 128)
49
50 /* Every 4 buckets, we jump up a power of 2. ...8 10 12 14 16 20 24 28 32... */
51 #define INTER_BUCKET_SPACE 4
52
53 #define SMALL_PAGES_PER_LARGE_PAGE (1 << BITS_FROM_SMALL_TO_LARGE_PAGE)
54
55 /* FIXME: Figure this out properly. */
56 #define MAX_SIZE (1 << 30)
57
58 /* How few object to fit in a page before using a larger one? (8) */
59 #define MAX_PAGE_OBJECT_ORDER   3
60
61 #define BITS_PER_LONG (sizeof(long) * CHAR_BIT)
62
63 struct bucket_state {
64         u32 elements_per_page;
65         u16 page_list;
66         u16 full_list;
67 };
68
69 struct header {
70         /* Bitmap of which pages are large. */
71         unsigned long pagesize[MAX_LARGE_PAGES / BITS_PER_LONG];
72
73         /* List of unused small/large pages. */
74         u16 small_free_list;
75         u16 large_free_list;
76
77         /* List of huge allocs. */
78         unsigned long huge;
79
80         /* This is less defined: we have two buckets for each power of 2 */
81         struct bucket_state bs[1];
82 };
83
84 struct huge_alloc {
85         unsigned long next, prev;
86         unsigned long off, len;
87 };
88
89 struct page_header {
90         u16 next, prev;
91         /* FIXME: We can just count all-0 and all-1 used[] elements. */
92         unsigned elements_used : 25;
93         unsigned bucket : 7;
94         unsigned long used[1]; /* One bit per element. */
95 };
96
97 /*
98  * Every 4 buckets, the size doubles.
99  * Between buckets, sizes increase linearly.
100  *
101  * eg. bucket 40 = 2^10                 = 1024
102  *     bucket 41 = 2^10 + 2^10*4        = 1024 + 256
103  *     bucket 42 = 2^10 + 2^10*4        = 1024 + 512
104  *     bucket 43 = 2^10 + 2^10*4        = 1024 + 768
105  *     bucket 45 = 2^11                 = 2048
106  *
107  * Care is taken to handle low numbered buckets, at cost of overflow.
108  */
109 static unsigned long bucket_to_size(unsigned int bucket)
110 {
111         unsigned long base = 1UL << (bucket / INTER_BUCKET_SPACE);
112         return base + ((bucket % INTER_BUCKET_SPACE)
113                        << (bucket / INTER_BUCKET_SPACE))
114                 / INTER_BUCKET_SPACE;
115 }
116
117 /*
118  * Say size is 10.
119  *   fls(size/2) == 3.  1 << 3 == 8, so we're 2 too large, out of a possible
120  * 8 too large.  That's 1/4 of the way to the next power of 2 == 1 bucket.
121  *
122  * We make sure we round up.  Note that this fails on 32 bit at size
123  * 1879048193 (around bucket 120).
124  */
125 static unsigned int size_to_bucket(unsigned long size)
126 {
127         unsigned int base = fls(size/2);
128         unsigned long overshoot;
129
130         overshoot = size - (1UL << base);
131         return base * INTER_BUCKET_SPACE
132                 + ((overshoot * INTER_BUCKET_SPACE + (1UL << base)-1) >> base);
133 }
134
135 static unsigned int small_page_bits(unsigned long poolsize)
136 {
137         return fls(poolsize / MAX_SMALL_PAGES - 1);
138 }
139
140 static struct page_header *from_pgnum(struct header *head,
141                                       unsigned long pgnum,
142                                       unsigned sp_bits)
143 {
144         return (struct page_header *)((char *)head + (pgnum << sp_bits));
145 }
146
147 static u16 to_pgnum(struct header *head, void *p, unsigned sp_bits)
148 {
149         return ((char *)p - (char *)head) >> sp_bits;
150 }
151
152 static size_t used_size(unsigned int num_elements)
153 {
154         return align_up(num_elements, BITS_PER_LONG) / CHAR_BIT;
155 }
156
157 /*
158  * We always align the first entry to the lower power of 2.
159  * eg. the 12-byte bucket gets 8-byte aligned.  The 4096-byte bucket
160  * gets 4096-byte aligned.
161  */
162 static unsigned long page_header_size(unsigned int align_bits,
163                                       unsigned long num_elements)
164 {
165         unsigned long size;
166
167         size = sizeof(struct page_header)
168                 - sizeof(((struct page_header *)0)->used)
169                 + used_size(num_elements);
170         return align_up(size, 1UL << align_bits);
171 }
172
173 static void add_to_list(struct header *head,
174                         u16 *list, struct page_header *ph, unsigned sp_bits)
175 {
176         unsigned long h = *list, offset = to_pgnum(head, ph, sp_bits);
177
178         ph->next = h;
179         if (h) {
180                 struct page_header *prev = from_pgnum(head, h, sp_bits);
181                 assert(prev->prev == 0);
182                 prev->prev = offset;
183         }
184         *list = offset;
185         ph->prev = 0;
186 }
187
188 static void del_from_list(struct header *head,
189                           u16 *list, struct page_header *ph, unsigned sp_bits)
190 {
191         /* Front of list? */
192         if (ph->prev == 0) {
193                 *list = ph->next;
194         } else {
195                 struct page_header *prev = from_pgnum(head, ph->prev, sp_bits);
196                 prev->next = ph->next;
197         }
198         if (ph->next != 0) {
199                 struct page_header *next = from_pgnum(head, ph->next, sp_bits);
200                 next->prev = ph->prev;
201         }
202 }
203
204 static u16 pop_from_list(struct header *head,
205                                    u16 *list,
206                                    unsigned int sp_bits)
207 {
208         u16 h = *list;
209         struct page_header *ph = from_pgnum(head, h, sp_bits);
210
211         if (likely(h)) {
212                 *list = ph->next;
213                 if (*list)
214                         from_pgnum(head, *list, sp_bits)->prev = 0;
215         }
216         return h;
217 }
218
219 static void add_to_huge_list(struct header *head, struct huge_alloc *ha)
220 {
221         unsigned long h = head->huge;
222         unsigned long offset = (char *)ha - (char *)head;
223
224         ha->next = h;
225         if (h) {
226                 struct huge_alloc *prev = (void *)((char *)head + h);
227                 assert(prev->prev == 0);
228                 prev->prev = offset;
229         }
230         head->huge = offset;
231         ha->prev = 0;
232 }
233
234 static void del_from_huge(struct header *head, struct huge_alloc *ha)
235 {
236         /* Front of list? */
237         if (ha->prev == 0) {
238                 head->huge = ha->next;
239         } else {
240                 struct huge_alloc *prev = (void *)((char *)head + ha->prev);
241                 prev->next = ha->next;
242         }
243         if (ha->next != 0) {
244                 struct huge_alloc *next = (void *)((char *)head + ha->next);
245                 next->prev = ha->prev;
246         }
247 }
248
249 static void add_small_page_to_freelist(struct header *head,
250                                        struct page_header *ph,
251                                        unsigned int sp_bits)
252 {
253         add_to_list(head, &head->small_free_list, ph, sp_bits);
254 }
255
256 static void add_large_page_to_freelist(struct header *head,
257                                        struct page_header *ph,
258                                        unsigned int sp_bits)
259 {
260         add_to_list(head, &head->large_free_list, ph, sp_bits);
261 }
262
263 static void add_to_bucket_list(struct header *head,
264                                struct bucket_state *bs,
265                                struct page_header *ph,
266                                unsigned int sp_bits)
267 {
268         add_to_list(head, &bs->page_list, ph, sp_bits);
269 }
270
271 static void del_from_bucket_list(struct header *head,
272                                  struct bucket_state *bs,
273                                  struct page_header *ph,
274                                  unsigned int sp_bits)
275 {
276         del_from_list(head, &bs->page_list, ph, sp_bits);
277 }
278
279 static void del_from_bucket_full_list(struct header *head,
280                                       struct bucket_state *bs,
281                                       struct page_header *ph,
282                                       unsigned int sp_bits)
283 {
284         del_from_list(head, &bs->full_list, ph, sp_bits);
285 }
286
287 static void add_to_bucket_full_list(struct header *head,
288                                     struct bucket_state *bs,
289                                     struct page_header *ph,
290                                     unsigned int sp_bits)
291 {
292         add_to_list(head, &bs->full_list, ph, sp_bits);
293 }
294
295 static void clear_bit(unsigned long bitmap[], unsigned int off)
296 {
297         bitmap[off / BITS_PER_LONG] &= ~(1UL << (off % BITS_PER_LONG));
298 }
299
300 static bool test_bit(const unsigned long bitmap[], unsigned int off)
301 {
302         return bitmap[off / BITS_PER_LONG] & (1UL << (off % BITS_PER_LONG));
303 }
304
305 static void set_bit(unsigned long bitmap[], unsigned int off)
306 {
307         bitmap[off / BITS_PER_LONG] |= (1UL << (off % BITS_PER_LONG));
308 }
309
310 /* There must be a bit to be found. */
311 static unsigned int find_free_bit(const unsigned long bitmap[])
312 {
313         unsigned int i;
314
315         for (i = 0; bitmap[i] == -1UL; i++);
316         return (i*BITS_PER_LONG) + ffsl(~bitmap[i]) - 1;
317 }
318
319 /* How many elements can we fit in a page? */
320 static unsigned long elements_per_page(unsigned long align_bits,
321                                        unsigned long esize,
322                                        unsigned long psize)
323 {
324         unsigned long num, overhead;
325
326         /* First approximation: no extra room for bitmap. */
327         overhead = align_up(sizeof(struct page_header), 1UL << align_bits);
328         num = (psize - overhead) / esize;
329
330         while (page_header_size(align_bits, num) + esize * num > psize)
331                 num--;
332         return num;
333 }
334
335 static bool large_page_bucket(unsigned int bucket, unsigned int sp_bits)
336 {
337         unsigned long max_smallsize;
338
339         /* Note: this doesn't take into account page header. */
340         max_smallsize = (1UL << sp_bits) >> MAX_PAGE_OBJECT_ORDER;
341
342         return bucket_to_size(bucket) > max_smallsize;
343 }
344
345 static unsigned int max_bucket(unsigned int lp_bits)
346 {
347         return (lp_bits - MAX_PAGE_OBJECT_ORDER) * INTER_BUCKET_SPACE;
348 }
349
350 void alloc_init(void *pool, unsigned long poolsize)
351 {
352         struct header *head = pool;
353         struct page_header *ph;
354         unsigned int lp_bits, sp_bits, num_buckets;
355         unsigned long header_size, i;
356
357         if (poolsize < MIN_USEFUL_SIZE) {
358                 tiny_alloc_init(pool, poolsize);
359                 return;
360         }
361
362         /* We rely on page numbers fitting in 16 bit. */
363         BUILD_ASSERT(MAX_SMALL_PAGES < 65536);
364         
365         sp_bits = small_page_bits(poolsize);
366         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
367
368         num_buckets = max_bucket(lp_bits);
369
370         head = pool;
371         header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
372
373         memset(head, 0, header_size);
374         for (i = 0; i < num_buckets; i++) {
375                 unsigned long pagesize;
376
377                 if (large_page_bucket(i, sp_bits))
378                         pagesize = 1UL << lp_bits;
379                 else
380                         pagesize = 1UL << sp_bits;
381
382                 head->bs[i].elements_per_page
383                         = elements_per_page(i / INTER_BUCKET_SPACE,
384                                             bucket_to_size(i),
385                                             pagesize);
386         }
387
388         /* They start as all large pages. */
389         memset(head->pagesize, 0xFF, sizeof(head->pagesize));
390         /* FIXME: small pages for last bit? */
391
392         /* Split first page into small pages. */
393         assert(header_size < (1UL << lp_bits));
394         clear_bit(head->pagesize, 0);
395
396         /* Skip over page(s) used by header, add rest to free list */
397         for (i = align_up(header_size, (1UL << sp_bits)) >> sp_bits;
398              i < SMALL_PAGES_PER_LARGE_PAGE;
399              i++) {
400                 ph = from_pgnum(head, i, sp_bits);
401                 ph->elements_used = 0;
402                 add_small_page_to_freelist(head, ph, sp_bits);
403         }
404
405         /* Add the rest of the pages as large pages. */
406         i = SMALL_PAGES_PER_LARGE_PAGE;
407         while ((i << sp_bits) + (1UL << lp_bits) <= poolsize) {
408                 assert(i < MAX_SMALL_PAGES);
409                 ph = from_pgnum(head, i, sp_bits);
410                 ph->elements_used = 0;
411                 add_large_page_to_freelist(head, ph, sp_bits);
412                 i += SMALL_PAGES_PER_LARGE_PAGE;
413         }
414 }
415
416 /* A large page worth of small pages are free: delete them from free list. */
417 static void del_large_from_small_free_list(struct header *head,
418                                            struct page_header *ph,
419                                            unsigned int sp_bits)
420 {
421         unsigned long i;
422
423         for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
424                 del_from_list(head, &head->small_free_list,
425                               (void *)ph + (i << sp_bits),
426                               sp_bits);
427         }
428 }
429
430 static bool all_empty(struct header *head,
431                       unsigned long pgnum,
432                       unsigned sp_bits)
433 {
434         unsigned long i;
435
436         for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
437                 struct page_header *ph = from_pgnum(head, pgnum + i, sp_bits);
438                 if (ph->elements_used)
439                         return false;
440         }
441         return true;
442 }
443
444 static void recombine_small_pages(struct header *head, unsigned long poolsize,
445                                   unsigned int sp_bits)
446 {
447         unsigned long i;
448         unsigned int lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
449
450         /* Look for small pages to coalesce, after first large page. */
451         for (i = SMALL_PAGES_PER_LARGE_PAGE;
452              i < (poolsize >> lp_bits) << BITS_FROM_SMALL_TO_LARGE_PAGE;
453              i += SMALL_PAGES_PER_LARGE_PAGE) {
454                 /* Already a large page? */
455                 if (test_bit(head->pagesize, i / SMALL_PAGES_PER_LARGE_PAGE))
456                         continue;
457                 if (all_empty(head, i, sp_bits)) {
458                         struct page_header *ph = from_pgnum(head, i, sp_bits);
459                         set_bit(head->pagesize,
460                                 i / SMALL_PAGES_PER_LARGE_PAGE);
461                         del_large_from_small_free_list(head, ph, sp_bits);
462                         add_large_page_to_freelist(head, ph, sp_bits);
463                 }
464         }
465 }
466
467 static u16 get_large_page(struct header *head, unsigned long poolsize,
468                           unsigned int sp_bits)
469 {
470         unsigned int lp_bits, page;
471
472         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
473
474         page = pop_from_list(head, &head->large_free_list, sp_bits);
475         if (likely(page))
476                 return page;
477
478         recombine_small_pages(head, poolsize, sp_bits);
479
480         return pop_from_list(head, &head->large_free_list, sp_bits);
481 }
482
483 /* Returns small page. */
484 static unsigned long break_up_large_page(struct header *head,
485                                          unsigned int sp_bits,
486                                          u16 lpage)
487 {
488         unsigned int i;
489
490         clear_bit(head->pagesize, lpage >> BITS_FROM_SMALL_TO_LARGE_PAGE);
491
492         for (i = 1; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
493                 struct page_header *ph = from_pgnum(head, lpage + i, sp_bits);
494                 /* Initialize this: huge_alloc reads it. */
495                 ph->elements_used = 0;
496                 add_small_page_to_freelist(head, ph, sp_bits);
497         }
498
499         return lpage;
500 }
501
502 static u16 get_small_page(struct header *head, unsigned long poolsize,
503                           unsigned int sp_bits)
504 {
505         u16 ret;
506
507         ret = pop_from_list(head, &head->small_free_list, sp_bits);
508         if (likely(ret))
509                 return ret;
510         ret = get_large_page(head, poolsize, sp_bits);
511         if (likely(ret))
512                 ret = break_up_large_page(head, sp_bits, ret);
513         return ret;
514 }
515
516 static bool huge_allocated(struct header *head, unsigned long offset)
517 {
518         unsigned long i;
519         struct huge_alloc *ha;
520
521         for (i = head->huge; i; i = ha->next) {
522                 ha = (void *)((char *)head + i);
523                 if (ha->off <= offset && ha->off + ha->len > offset)
524                         return true;
525         }
526         return false;
527 }
528
529 /* They want something really big.  Aim for contiguous pages (slow). */
530 static COLD void *huge_alloc(void *pool, unsigned long poolsize,
531                              unsigned long size, unsigned long align)
532 {
533         struct header *head = pool;
534         struct huge_alloc *ha;
535         unsigned long i, sp_bits, lp_bits, num, header_size;
536
537         sp_bits = small_page_bits(poolsize);
538         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
539
540         /* Allocate tracking structure optimistically. */
541         ha = alloc_get(pool, poolsize, sizeof(*ha), ALIGNOF(*ha));
542         if (!ha)
543                 return NULL;
544
545         /* First search for contiguous small pages... */
546         header_size = sizeof(*head) + sizeof(head->bs) * (max_bucket(lp_bits)-1);
547
548         num = 0;
549         for (i = (header_size + (1UL << sp_bits) - 1) >> sp_bits;
550              i << sp_bits < poolsize;
551              i++) {
552                 struct page_header *pg;
553                 unsigned long off = (i << sp_bits);
554
555                 /* Skip over large pages. */
556                 if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
557                         i += (1UL << BITS_FROM_SMALL_TO_LARGE_PAGE)-1;
558                         continue;
559                 }
560
561                 /* Does this page meet alignment requirements? */
562                 if (!num && off % align != 0)
563                         continue;
564
565                 /* FIXME: This makes us O(n^2). */
566                 if (huge_allocated(head, off)) {
567                         num = 0;
568                         continue;
569                 }
570
571                 pg = (struct page_header *)((char *)head + off);
572                 if (pg->elements_used) {
573                         num = 0;
574                         continue;
575                 }
576
577                 num++;
578                 if (num << sp_bits >= size) {
579                         unsigned long pgnum;
580
581                         /* Remove from free list. */
582                         for (pgnum = i; pgnum > i - num; pgnum--) {
583                                 pg = from_pgnum(head, pgnum, sp_bits);
584                                 del_from_list(head,
585                                               &head->small_free_list,
586                                               pg, sp_bits);
587                         }
588                         ha->off = (i - num + 1) << sp_bits;
589                         ha->len = num << sp_bits;
590                         goto done;
591                 }
592         }
593
594         /* Now search for large pages... */
595         recombine_small_pages(head, poolsize, sp_bits);
596
597         num = 0;
598         for (i = (header_size + (1UL << lp_bits) - 1) >> lp_bits;
599              (i << lp_bits) < poolsize; i++) {
600                 struct page_header *pg;
601                 unsigned long off = (i << lp_bits);
602
603                 /* Ignore small pages. */
604                 if (!test_bit(head->pagesize, i))
605                         continue;
606
607                 /* Does this page meet alignment requirements? */
608                 if (!num && off % align != 0)
609                         continue;
610
611                 /* FIXME: This makes us O(n^2). */
612                 if (huge_allocated(head, off)) {
613                         num = 0;
614                         continue;
615                 }
616
617                 pg = (struct page_header *)((char *)head + off);
618                 if (pg->elements_used) {
619                         num = 0;
620                         continue;
621                 }
622
623                 num++;
624                 if (num << lp_bits >= size) {
625                         unsigned long pgnum;
626
627                         /* Remove from free list. */
628                         for (pgnum = i; pgnum > i - num; pgnum--) {
629                                 pg = from_pgnum(head, pgnum, lp_bits);
630                                 del_from_list(head,
631                                               &head->large_free_list,
632                                               pg, sp_bits);
633                         }
634                         ha->off = (i - num + 1) << lp_bits;
635                         ha->len = num << lp_bits;
636                         goto done;
637                 }
638         }
639
640         /* Unable to satisfy: free huge alloc structure. */
641         alloc_free(pool, poolsize, ha);
642         return NULL;
643
644 done:
645         add_to_huge_list(pool, ha);
646         return (char *)pool + ha->off;
647 }
648
649 static COLD void
650 huge_free(struct header *head, unsigned long poolsize, void *free)
651 {
652         unsigned long i, off, pgnum, free_off = (char *)free - (char *)head;
653         unsigned int sp_bits, lp_bits;
654         struct huge_alloc *ha;
655
656         for (i = head->huge; i; i = ha->next) {
657                 ha = (void *)((char *)head + i);
658                 if (free_off == ha->off)
659                         break;
660         }
661         assert(i);
662
663         /* Free up all the pages, delete and free ha */
664         sp_bits = small_page_bits(poolsize);
665         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
666         pgnum = free_off >> sp_bits;
667
668         if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
669                 for (off = ha->off;
670                      off < ha->off + ha->len;
671                      off += 1UL << lp_bits) {
672                         add_large_page_to_freelist(head,
673                                                    (void *)((char *)head + off),
674                                                    sp_bits);
675                 }
676         } else {
677                 for (off = ha->off;
678                      off < ha->off + ha->len;
679                      off += 1UL << sp_bits) {
680                         add_small_page_to_freelist(head,
681                                                    (void *)((char *)head + off),
682                                                    sp_bits);
683                 }
684         }
685         del_from_huge(head, ha);
686         alloc_free(head, poolsize, ha);
687 }
688
689 static COLD unsigned long huge_size(struct header *head, void *p)
690 {
691         unsigned long i, off = (char *)p - (char *)head;
692         struct huge_alloc *ha;
693
694         for (i = head->huge; i; i = ha->next) {
695                 ha = (void *)((char *)head + i);
696                 if (off == ha->off) {
697                         return ha->len;
698                 }
699         }
700         abort();
701 }
702
703 void *alloc_get(void *pool, unsigned long poolsize,
704                 unsigned long size, unsigned long align)
705 {
706         struct header *head = pool;
707         unsigned int bucket;
708         unsigned long i;
709         struct bucket_state *bs;
710         struct page_header *ph;
711         unsigned int sp_bits;
712
713         if (poolsize < MIN_USEFUL_SIZE) {
714                 return tiny_alloc_get(pool, poolsize, size, align);
715         }
716
717         size = align_up(size, align);
718         if (unlikely(!size))
719                 size = 1;
720         bucket = size_to_bucket(size);
721
722         sp_bits = small_page_bits(poolsize);
723
724         if (bucket >= max_bucket(sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE)) {
725                 return huge_alloc(pool, poolsize, size, align);
726         }
727
728         bs = &head->bs[bucket];
729
730         if (!bs->page_list) {
731                 struct page_header *ph;
732
733                 if (large_page_bucket(bucket, sp_bits))
734                         bs->page_list = get_large_page(head, poolsize,
735                                                        sp_bits);
736                 else
737                         bs->page_list = get_small_page(head, poolsize,
738                                                        sp_bits);
739                 /* FIXME: Try large-aligned alloc?  Header stuffing? */
740                 if (unlikely(!bs->page_list))
741                         return NULL;
742                 ph = from_pgnum(head, bs->page_list, sp_bits);
743                 ph->bucket = bucket;
744                 ph->elements_used = 0;
745                 ph->next = 0;
746                 memset(ph->used, 0, used_size(bs->elements_per_page));
747         }
748
749         ph = from_pgnum(head, bs->page_list, sp_bits);
750
751         i = find_free_bit(ph->used);
752         set_bit(ph->used, i);
753         ph->elements_used++;
754
755         /* check if this page is now full */
756         if (unlikely(ph->elements_used == bs->elements_per_page)) {
757                 del_from_bucket_list(head, bs, ph, sp_bits);
758                 add_to_bucket_full_list(head, bs, ph, sp_bits);
759         }
760
761         return (char *)ph + page_header_size(ph->bucket / INTER_BUCKET_SPACE,
762                                              bs->elements_per_page)
763                + i * bucket_to_size(bucket);
764 }
765
766 void alloc_free(void *pool, unsigned long poolsize, void *free)
767 {
768         struct header *head = pool;
769         struct bucket_state *bs;
770         unsigned int sp_bits;
771         unsigned long i, pgnum, pgoffset, offset = (char *)free - (char *)pool;
772         bool smallpage;
773         struct page_header *ph;
774
775         if (poolsize < MIN_USEFUL_SIZE) {
776                 return tiny_alloc_free(pool, poolsize, free);
777         }
778         
779         /* Get page header. */
780         sp_bits = small_page_bits(poolsize);
781         pgnum = offset >> sp_bits;
782
783         /* Big page? Round down further. */
784         if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
785                 smallpage = false;
786                 pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
787         } else
788                 smallpage = true;
789
790         /* Step back to page header. */
791         ph = from_pgnum(head, pgnum, sp_bits);
792         if ((void *)ph == free) {
793                 huge_free(head, poolsize, free);
794                 return;
795         }
796
797         bs = &head->bs[ph->bucket];
798         pgoffset = offset - (pgnum << sp_bits)
799                 - page_header_size(ph->bucket / INTER_BUCKET_SPACE,
800                                    bs->elements_per_page);
801
802         if (unlikely(ph->elements_used == bs->elements_per_page)) {
803                 del_from_bucket_full_list(head, bs, ph, sp_bits);
804                 add_to_bucket_list(head, bs, ph, sp_bits);
805         }
806
807         /* Which element are we? */
808         i = pgoffset / bucket_to_size(ph->bucket);
809         clear_bit(ph->used, i);
810         ph->elements_used--;
811
812         if (unlikely(ph->elements_used == 0)) {
813                 bs = &head->bs[ph->bucket];
814                 del_from_bucket_list(head, bs, ph, sp_bits);
815                 if (smallpage)
816                         add_small_page_to_freelist(head, ph, sp_bits);
817                 else
818                         add_large_page_to_freelist(head, ph, sp_bits);
819         }
820 }
821
822 unsigned long alloc_size(void *pool, unsigned long poolsize, void *p)
823 {
824         struct header *head = pool;
825         unsigned int pgnum, sp_bits;
826         unsigned long offset = (char *)p - (char *)pool;
827         struct page_header *ph;
828
829         if (poolsize < MIN_USEFUL_SIZE)
830                 return tiny_alloc_size(pool, poolsize, p);
831
832         /* Get page header. */
833         sp_bits = small_page_bits(poolsize);
834         pgnum = offset >> sp_bits;
835
836         /* Big page? Round down further. */
837         if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE))
838                 pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
839
840         /* Step back to page header. */
841         ph = from_pgnum(head, pgnum, sp_bits);
842         if ((void *)ph == p)
843                 return huge_size(head, p);
844
845         return bucket_to_size(ph->bucket);
846 }
847
848 /* Useful for gdb breakpoints. */
849 static bool check_fail(void)
850 {
851         return false;
852 }
853
854 static unsigned long count_bits(const unsigned long bitmap[],
855                                 unsigned long limit)
856 {
857         unsigned long i, count = 0;
858
859         while (limit >= BITS_PER_LONG) {
860                 count += popcount(bitmap[0]);
861                 bitmap++;
862                 limit -= BITS_PER_LONG;
863         }
864
865         for (i = 0; i < limit; i++)
866                 if (test_bit(bitmap, i))
867                         count++;
868         return count;
869 }
870
871 static bool out_of_bounds(unsigned long pgnum,
872                           unsigned int sp_bits,
873                           unsigned long pagesize,
874                           unsigned long poolsize)
875 {
876         if (((pgnum << sp_bits) >> sp_bits) != pgnum)
877                 return true;
878
879         if ((pgnum << sp_bits) > poolsize)
880                 return true;
881
882         return ((pgnum << sp_bits) + pagesize > poolsize);
883 }
884
885 static bool check_bucket(struct header *head,
886                          unsigned long poolsize,
887                          unsigned long pages[],
888                          struct bucket_state *bs,
889                          unsigned int bindex)
890 {
891         bool lp_bucket;
892         struct page_header *ph;
893         unsigned long taken, i, prev, pagesize, sp_bits, lp_bits;
894
895         sp_bits = small_page_bits(poolsize);
896         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
897
898         lp_bucket = large_page_bucket(bindex, sp_bits);
899
900         pagesize = 1UL << (lp_bucket ? lp_bits : sp_bits);
901
902         /* This many elements fit? */
903         taken = page_header_size(bindex / INTER_BUCKET_SPACE,
904                                  bs->elements_per_page);
905         taken += bucket_to_size(bindex) * bs->elements_per_page;
906         if (taken > pagesize)
907                 return check_fail();
908
909         /* One more wouldn't fit? */
910         taken = page_header_size(bindex / INTER_BUCKET_SPACE,
911                                  bs->elements_per_page + 1);
912         taken += bucket_to_size(bindex) * (bs->elements_per_page + 1);
913         if (taken <= pagesize)
914                 return check_fail();
915
916         /* Walk used list. */
917         prev = 0;
918         for (i = bs->page_list; i; i = ph->next) {
919                 /* Bad pointer? */
920                 if (out_of_bounds(i, sp_bits, pagesize, poolsize))
921                         return check_fail();
922                 /* Wrong size page? */
923                 if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
924                     != lp_bucket)
925                         return check_fail();
926                 /* Large page not on boundary? */
927                 if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
928                         return check_fail();
929                 ph = from_pgnum(head, i, sp_bits);
930                 /* Linked list corrupt? */
931                 if (ph->prev != prev)
932                         return check_fail();
933                 /* Already seen this page? */
934                 if (test_bit(pages, i))
935                         return check_fail();
936                 set_bit(pages, i);
937                 /* Empty or full? */
938                 if (ph->elements_used == 0)
939                         return check_fail();
940                 if (ph->elements_used >= bs->elements_per_page)
941                         return check_fail();
942                 /* Used bits don't agree? */
943                 if (ph->elements_used != count_bits(ph->used,
944                                                     bs->elements_per_page))
945                         return check_fail();
946                 /* Wrong bucket? */
947                 if (ph->bucket != bindex)
948                         return check_fail();
949                 prev = i;
950         }
951
952         /* Walk full list. */
953         prev = 0;
954         for (i = bs->full_list; i; i = ph->next) {
955                 /* Bad pointer? */
956                 if (out_of_bounds(i, sp_bits, pagesize, poolsize))
957                         return check_fail();
958                 /* Wrong size page? */
959                 if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
960                     != lp_bucket)
961                 /* Large page not on boundary? */
962                 if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
963                         return check_fail();
964                 ph = from_pgnum(head, i, sp_bits);
965                 /* Linked list corrupt? */
966                 if (ph->prev != prev)
967                         return check_fail();
968                 /* Already seen this page? */
969                 if (test_bit(pages, i))
970                         return check_fail();
971                 set_bit(pages, i);
972                 /* Not full? */
973                 if (ph->elements_used != bs->elements_per_page)
974                         return check_fail();
975                 /* Used bits don't agree? */
976                 if (ph->elements_used != count_bits(ph->used,
977                                                     bs->elements_per_page))
978                         return check_fail();
979                 /* Wrong bucket? */
980                 if (ph->bucket != bindex)
981                         return check_fail();
982                 prev = i;
983         }
984         return true;
985 }
986
987 bool alloc_check(void *pool, unsigned long poolsize)
988 {
989         struct header *head = pool;
990         unsigned long prev, i, lp_bits, sp_bits, header_size, num_buckets;
991         struct page_header *ph;
992         struct huge_alloc *ha;
993         unsigned long pages[MAX_SMALL_PAGES / BITS_PER_LONG] = { 0 };
994
995         if (poolsize < MIN_USEFUL_SIZE)
996                 return tiny_alloc_check(pool, poolsize);
997
998         sp_bits = small_page_bits(poolsize);
999         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
1000
1001         num_buckets = max_bucket(lp_bits);
1002
1003         header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
1004
1005         /* First, set all bits taken by header. */
1006         for (i = 0; i < header_size; i += (1UL << sp_bits))
1007                 set_bit(pages, i >> sp_bits);
1008
1009         /* Check small page free list. */
1010         prev = 0;
1011         for (i = head->small_free_list; i; i = ph->next) {
1012                 /* Bad pointer? */
1013                 if (out_of_bounds(i, sp_bits, 1UL << sp_bits, poolsize))
1014                         return check_fail();
1015                 /* Large page? */
1016                 if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
1017                         return check_fail();
1018                 ph = from_pgnum(head, i, sp_bits);
1019                 /* Linked list corrupt? */
1020                 if (ph->prev != prev)
1021                         return check_fail();
1022                 /* Already seen this page? */
1023                 if (test_bit(pages, i))
1024                         return check_fail();
1025                 set_bit(pages, i);
1026                 prev = i;
1027         }
1028
1029         /* Check large page free list. */
1030         prev = 0;
1031         for (i = head->large_free_list; i; i = ph->next) {
1032                 /* Bad pointer? */
1033                 if (out_of_bounds(i, sp_bits, 1UL << lp_bits, poolsize))
1034                         return check_fail();
1035                 /* Not large page? */
1036                 if (!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
1037                         return check_fail();
1038                 /* Not page boundary? */
1039                 if ((i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
1040                         return check_fail();
1041                 ph = from_pgnum(head, i, sp_bits);
1042                 /* Linked list corrupt? */
1043                 if (ph->prev != prev)
1044                         return check_fail();
1045                 /* Already seen this page? */
1046                 if (test_bit(pages, i))
1047                         return check_fail();
1048                 set_bit(pages, i);
1049                 prev = i;
1050         }
1051
1052         /* Check the buckets. */
1053         for (i = 0; i < max_bucket(lp_bits); i++) {
1054                 struct bucket_state *bs = &head->bs[i];
1055
1056                 if (!check_bucket(head, poolsize, pages, bs, i))
1057                         return false;
1058         }
1059
1060         /* Check the huge alloc list. */
1061         prev = 0;
1062         for (i = head->huge; i; i = ha->next) {
1063                 unsigned long pgbits, j;
1064
1065                 /* Bad pointer? */
1066                 if (i >= poolsize || i + sizeof(*ha) > poolsize)
1067                         return check_fail();
1068                 ha = (void *)((char *)head + i);
1069
1070                 /* Check contents of ha. */
1071                 if (ha->off > poolsize || ha->off + ha->len > poolsize)
1072                         return check_fail();
1073
1074                 /* Large or small page? */
1075                 pgbits = test_bit(head->pagesize, ha->off >> lp_bits)
1076                         ? lp_bits : sp_bits;
1077
1078                 /* Not page boundary? */
1079                 if ((ha->off % (1UL << pgbits)) != 0)
1080                         return check_fail();
1081
1082                 /* Not page length? */
1083                 if ((ha->len % (1UL << pgbits)) != 0)
1084                         return check_fail();
1085
1086                 /* Linked list corrupt? */
1087                 if (ha->prev != prev)
1088                         return check_fail();
1089
1090                 for (j = ha->off; j < ha->off + ha->len; j += (1UL<<sp_bits)) {
1091                         /* Already seen this page? */
1092                         if (test_bit(pages, j >> sp_bits))
1093                                 return check_fail();
1094                         set_bit(pages, j >> sp_bits);
1095                 }
1096
1097                 prev = i;
1098         }
1099                 
1100         /* Make sure every page accounted for. */
1101         for (i = 0; i < poolsize >> sp_bits; i++) {
1102                 if (!test_bit(pages, i))
1103                         return check_fail();
1104                 if (test_bit(head->pagesize,
1105                              i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
1106                         /* Large page, skip rest. */
1107                         i += SMALL_PAGES_PER_LARGE_PAGE - 1;
1108                 }
1109         }
1110
1111         return true;
1112 }
1113
1114 static unsigned long print_overhead(FILE *out, const char *desc,
1115                                     unsigned long bytes,
1116                                     unsigned long poolsize)
1117 {
1118         fprintf(out, "Overhead (%s): %lu bytes (%.3g%%)\n",
1119                 desc, bytes, 100.0 * bytes / poolsize);
1120         return bytes;
1121 }
1122
1123 static unsigned long count_list(struct header *head,
1124                                 u16 pgnum,
1125                                 unsigned int sp_bits,
1126                                 unsigned long *total_elems)
1127 {
1128         struct page_header *p;
1129         unsigned long ret = 0;
1130
1131         while (pgnum) {
1132                 p = from_pgnum(head, pgnum, sp_bits);
1133                 if (total_elems)
1134                         (*total_elems) += p->elements_used;
1135                 ret++;
1136                 pgnum = p->next;
1137         }
1138         return ret;
1139 }
1140
1141 static unsigned long visualize_bucket(FILE *out, struct header *head,
1142                                       unsigned int bucket,
1143                                       unsigned long poolsize,
1144                                       unsigned int sp_bits)
1145 {
1146         unsigned long num_full, num_partial, num_pages, page_size,
1147                 elems, hdr_min, hdr_size, elems_per_page, overhead = 0;
1148
1149         elems_per_page = head->bs[bucket].elements_per_page;
1150
1151         /* If we used byte-based bitmaps, we could get pg hdr to: */
1152         hdr_min = sizeof(struct page_header)
1153                 - sizeof(((struct page_header *)0)->used)
1154                 + align_up(elems_per_page, CHAR_BIT) / CHAR_BIT;
1155         hdr_size = page_header_size(bucket / INTER_BUCKET_SPACE,
1156                                     elems_per_page);
1157
1158         elems = 0;
1159         num_full = count_list(head, head->bs[bucket].full_list, sp_bits,
1160                               &elems);
1161         num_partial = count_list(head, head->bs[bucket].page_list, sp_bits,
1162                                  &elems);
1163         num_pages = num_full + num_partial;
1164         if (!num_pages)
1165                 return 0;
1166
1167         fprintf(out, "Bucket %u (%lu bytes):"
1168                 " %lu full, %lu partial = %lu elements\n",
1169                 bucket, bucket_to_size(bucket), num_full, num_partial, elems);
1170         /* Strict requirement of page header size. */
1171         overhead += print_overhead(out, "page headers",
1172                                    hdr_min * num_pages, poolsize);
1173         /* Gap between minimal page header and actual start. */
1174         overhead += print_overhead(out, "page post-header alignments",
1175                                    (hdr_size - hdr_min) * num_pages, poolsize);
1176         /* Between last element and end of page. */
1177         page_size = (1UL << sp_bits);
1178         if (large_page_bucket(bucket, sp_bits))
1179                 page_size <<= BITS_FROM_SMALL_TO_LARGE_PAGE;
1180
1181         overhead += print_overhead(out, "page tails",
1182                                    (page_size - (hdr_size
1183                                                  + (elems_per_page
1184                                                     * bucket_to_size(bucket))))
1185                                    * num_pages, poolsize);
1186         return overhead;
1187 }
1188
1189 void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
1190 {
1191         struct header *head = pool;
1192         unsigned long i, lp_bits, sp_bits, header_size, num_buckets, count,
1193                 overhead = 0;
1194
1195         fprintf(out, "Pool %p size %lu: (%s allocator)\n", pool, poolsize,
1196                 poolsize < MIN_USEFUL_SIZE ? "tiny" : "standard");
1197
1198         if (poolsize < MIN_USEFUL_SIZE) {
1199                 tiny_alloc_visualize(out, pool, poolsize);
1200                 return;
1201         }
1202         
1203         sp_bits = small_page_bits(poolsize);
1204         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
1205
1206         num_buckets = max_bucket(lp_bits);
1207         header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
1208
1209         fprintf(out, "Large page size %lu, small page size %lu.\n",
1210                 1UL << lp_bits, 1UL << sp_bits);
1211         overhead += print_overhead(out, "unused pool tail",
1212                                    poolsize % (1UL << lp_bits), poolsize);
1213         fprintf(out, "Main header %lu bytes (%lu small pages).\n",
1214                 header_size, align_up(header_size, 1UL << sp_bits) >> sp_bits);
1215         overhead += print_overhead(out, "partial header page",
1216                                    align_up(header_size, 1UL << sp_bits)
1217                                    - header_size, poolsize);
1218         /* Total large pages. */
1219         i = count_bits(head->pagesize, poolsize >> lp_bits);
1220         /* Used pages. */
1221         count = i - count_list(head, head->large_free_list, sp_bits, NULL);
1222         fprintf(out, "%lu/%lu large pages used (%.3g%%)\n",
1223                 count, i, count ? 100.0 * count / i : 0.0);
1224
1225         /* Total small pages. */
1226         i = ((poolsize >> lp_bits) - i) << BITS_FROM_SMALL_TO_LARGE_PAGE;
1227         /* Used pages */
1228         count = i - count_list(head, head->small_free_list, sp_bits, NULL);
1229         fprintf(out, "%lu/%lu small pages used (%.3g%%)\n",
1230                 count, i, count ? 100.0 * count / i : 0.0);
1231
1232         /* Summary of each bucket. */
1233         fprintf(out, "%lu buckets:\n", num_buckets);
1234         for (i = 0; i < num_buckets; i++)
1235                 overhead += visualize_bucket(out, head, i, poolsize, sp_bits);
1236
1237         print_overhead(out, "total", overhead, poolsize);
1238 }