]> git.ozlabs.org Git - ccan/blob - ccan/alloc/alloc.c
8e0b72aa0432227990f7b053f6620020fd53dcac
[ccan] / ccan / alloc / alloc.c
1 #include <unistd.h>
2 #include <stdint.h>
3 #include <string.h>
4 #include <limits.h>
5 #include <assert.h>
6 #include <stdlib.h>
7 #include "alloc.h"
8 #include "bitops.h"
9 #include "tiny.h"
10 #include <ccan/build_assert/build_assert.h>
11 #include <ccan/likely/likely.h>
12 #include <ccan/alignof/alignof.h>
13 #include <ccan/short_types/short_types.h>
14 #include "config.h"
15
16 /*
17    Inspired by (and parts taken from) Andrew Tridgell's alloc_mmap:
18    http://samba.org/~tridge/junkcode/alloc_mmap/
19
20    Copyright (C) Andrew Tridgell 2007
21    
22    This library is free software; you can redistribute it and/or
23    modify it under the terms of the GNU Lesser General Public
24    License as published by the Free Software Foundation; either
25    version 2 of the License, or (at your option) any later version.
26
27    This library is distributed in the hope that it will be useful,
28    but WITHOUT ANY WARRANTY; without even the implied warranty of
29    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
30    Lesser General Public License for more details.
31
32    You should have received a copy of the GNU Lesser General Public
33    License along with this library; if not, write to the Free Software
34    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
35  */
36
37 /* We divide the pool into this many large pages (nearest power of 2) */
38 #define MAX_LARGE_PAGES (256UL)
39
40 /* 32 small pages == 1 large page. */
41 #define BITS_FROM_SMALL_TO_LARGE_PAGE 5
42
43 #define MAX_SMALL_PAGES (MAX_LARGE_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE)
44
45 /* Smallest pool size for this scheme: 128-byte small pages.  That's
46  * 9/13% overhead for 32/64 bit. */
47 #define MIN_USEFUL_SIZE (MAX_SMALL_PAGES * 128)
48
49 /* Every 4 buckets, we jump up a power of 2. ...8 10 12 14 16 20 24 28 32... */
50 #define INTER_BUCKET_SPACE 4
51
52 #define SMALL_PAGES_PER_LARGE_PAGE (1 << BITS_FROM_SMALL_TO_LARGE_PAGE)
53
54 /* FIXME: Figure this out properly. */
55 #define MAX_SIZE (1 << 30)
56
57 /* How few object to fit in a page before using a larger one? (8) */
58 #define MAX_PAGE_OBJECT_ORDER   3
59
60 #define BITS_PER_LONG (sizeof(long) * CHAR_BIT)
61
62 struct bucket_state {
63         u32 elements_per_page;
64         u16 page_list;
65         u16 full_list;
66 };
67
68 struct header {
69         /* Bitmap of which pages are large. */
70         unsigned long pagesize[MAX_LARGE_PAGES / BITS_PER_LONG];
71
72         /* List of unused small/large pages. */
73         u16 small_free_list;
74         u16 large_free_list;
75
76         /* List of huge allocs. */
77         unsigned long huge;
78
79         /* This is less defined: we have two buckets for each power of 2 */
80         struct bucket_state bs[1];
81 };
82
83 struct huge_alloc {
84         unsigned long next, prev;
85         unsigned long off, len;
86 };
87
88 struct page_header {
89         u16 next, prev;
90         /* FIXME: We can just count all-0 and all-1 used[] elements. */
91         unsigned elements_used : 25;
92         unsigned bucket : 7;
93         unsigned long used[1]; /* One bit per element. */
94 };
95
96 /*
97  * Every 4 buckets, the size doubles.
98  * Between buckets, sizes increase linearly.
99  *
100  * eg. bucket 40 = 2^10                 = 1024
101  *     bucket 41 = 2^10 + 2^10*4        = 1024 + 256
102  *     bucket 42 = 2^10 + 2^10*4        = 1024 + 512
103  *     bucket 43 = 2^10 + 2^10*4        = 1024 + 768
104  *     bucket 45 = 2^11                 = 2048
105  *
106  * Care is taken to handle low numbered buckets, at cost of overflow.
107  */
108 static unsigned long bucket_to_size(unsigned int bucket)
109 {
110         unsigned long base = 1 << (bucket / INTER_BUCKET_SPACE);
111         return base + ((bucket % INTER_BUCKET_SPACE)
112                        << (bucket / INTER_BUCKET_SPACE))
113                 / INTER_BUCKET_SPACE;
114 }
115
116 /*
117  * Say size is 10.
118  *   fls(size/2) == 3.  1 << 3 == 8, so we're 2 too large, out of a possible
119  * 8 too large.  That's 1/4 of the way to the next power of 2 == 1 bucket.
120  *
121  * We make sure we round up.  Note that this fails on 32 bit at size
122  * 1879048193 (around bucket 120).
123  */
124 static unsigned int size_to_bucket(unsigned long size)
125 {
126         unsigned int base = fls(size/2);
127         unsigned long overshoot;
128
129         overshoot = size - (1 << base);
130         return base * INTER_BUCKET_SPACE
131                 + ((overshoot * INTER_BUCKET_SPACE + (1 << base)-1) >> base);
132 }
133
134 static unsigned int small_page_bits(unsigned long poolsize)
135 {
136         return fls(poolsize / MAX_SMALL_PAGES / 2);
137 }
138
139 static struct page_header *from_pgnum(struct header *head,
140                                       unsigned long pgnum,
141                                       unsigned sp_bits)
142 {
143         return (struct page_header *)((char *)head + (pgnum << sp_bits));
144 }
145
146 static u16 to_pgnum(struct header *head, void *p, unsigned sp_bits)
147 {
148         return ((char *)p - (char *)head) >> sp_bits;
149 }
150
151 static size_t used_size(unsigned int num_elements)
152 {
153         return align_up(num_elements, BITS_PER_LONG) / CHAR_BIT;
154 }
155
156 /*
157  * We always align the first entry to the lower power of 2.
158  * eg. the 12-byte bucket gets 8-byte aligned.  The 4096-byte bucket
159  * gets 4096-byte aligned.
160  */
161 static unsigned long page_header_size(unsigned int align_bits,
162                                       unsigned long num_elements)
163 {
164         unsigned long size;
165
166         size = sizeof(struct page_header)
167                 - sizeof(((struct page_header *)0)->used)
168                 + used_size(num_elements);
169         return align_up(size, 1 << align_bits);
170 }
171
172 static void add_to_list(struct header *head,
173                         u16 *list, struct page_header *ph, unsigned sp_bits)
174 {
175         unsigned long h = *list, offset = to_pgnum(head, ph, sp_bits);
176
177         ph->next = h;
178         if (h) {
179                 struct page_header *prev = from_pgnum(head, h, sp_bits);
180                 assert(prev->prev == 0);
181                 prev->prev = offset;
182         }
183         *list = offset;
184         ph->prev = 0;
185 }
186
187 static void del_from_list(struct header *head,
188                           u16 *list, struct page_header *ph, unsigned sp_bits)
189 {
190         /* Front of list? */
191         if (ph->prev == 0) {
192                 *list = ph->next;
193         } else {
194                 struct page_header *prev = from_pgnum(head, ph->prev, sp_bits);
195                 prev->next = ph->next;
196         }
197         if (ph->next != 0) {
198                 struct page_header *next = from_pgnum(head, ph->next, sp_bits);
199                 next->prev = ph->prev;
200         }
201 }
202
203 static u16 pop_from_list(struct header *head,
204                                    u16 *list,
205                                    unsigned int sp_bits)
206 {
207         u16 h = *list;
208         struct page_header *ph = from_pgnum(head, h, sp_bits);
209
210         if (likely(h)) {
211                 *list = ph->next;
212                 if (*list)
213                         from_pgnum(head, *list, sp_bits)->prev = 0;
214         }
215         return h;
216 }
217
218 static void add_to_huge_list(struct header *head, struct huge_alloc *ha)
219 {
220         unsigned long h = head->huge;
221         unsigned long offset = (char *)ha - (char *)head;
222
223         ha->next = h;
224         if (h) {
225                 struct huge_alloc *prev = (void *)((char *)head + h);
226                 assert(prev->prev == 0);
227                 prev->prev = offset;
228         }
229         head->huge = offset;
230         ha->prev = 0;
231 }
232
233 static void del_from_huge(struct header *head, struct huge_alloc *ha)
234 {
235         /* Front of list? */
236         if (ha->prev == 0) {
237                 head->huge = ha->next;
238         } else {
239                 struct huge_alloc *prev = (void *)((char *)head + ha->prev);
240                 prev->next = ha->next;
241         }
242         if (ha->next != 0) {
243                 struct huge_alloc *next = (void *)((char *)head + ha->next);
244                 next->prev = ha->prev;
245         }
246 }
247
248 static void add_small_page_to_freelist(struct header *head,
249                                        struct page_header *ph,
250                                        unsigned int sp_bits)
251 {
252         add_to_list(head, &head->small_free_list, ph, sp_bits);
253 }
254
255 static void add_large_page_to_freelist(struct header *head,
256                                        struct page_header *ph,
257                                        unsigned int sp_bits)
258 {
259         add_to_list(head, &head->large_free_list, ph, sp_bits);
260 }
261
262 static void add_to_bucket_list(struct header *head,
263                                struct bucket_state *bs,
264                                struct page_header *ph,
265                                unsigned int sp_bits)
266 {
267         add_to_list(head, &bs->page_list, ph, sp_bits);
268 }
269
270 static void del_from_bucket_list(struct header *head,
271                                  struct bucket_state *bs,
272                                  struct page_header *ph,
273                                  unsigned int sp_bits)
274 {
275         del_from_list(head, &bs->page_list, ph, sp_bits);
276 }
277
278 static void del_from_bucket_full_list(struct header *head,
279                                       struct bucket_state *bs,
280                                       struct page_header *ph,
281                                       unsigned int sp_bits)
282 {
283         del_from_list(head, &bs->full_list, ph, sp_bits);
284 }
285
286 static void add_to_bucket_full_list(struct header *head,
287                                     struct bucket_state *bs,
288                                     struct page_header *ph,
289                                     unsigned int sp_bits)
290 {
291         add_to_list(head, &bs->full_list, ph, sp_bits);
292 }
293
294 static void clear_bit(unsigned long bitmap[], unsigned int off)
295 {
296         bitmap[off / BITS_PER_LONG] &= ~(1 << (off % BITS_PER_LONG));
297 }
298
299 static bool test_bit(const unsigned long bitmap[], unsigned int off)
300 {
301         return bitmap[off / BITS_PER_LONG] & (1 << (off % BITS_PER_LONG));
302 }
303
304 static void set_bit(unsigned long bitmap[], unsigned int off)
305 {
306         bitmap[off / BITS_PER_LONG] |= (1 << (off % BITS_PER_LONG));
307 }
308
309 /* There must be a bit to be found. */
310 static unsigned int find_free_bit(const unsigned long bitmap[])
311 {
312         unsigned int i;
313
314         for (i = 0; bitmap[i] == -1UL; i++);
315         return (i*BITS_PER_LONG) + ffsl(~bitmap[i]) - 1;
316 }
317
318 /* How many elements can we fit in a page? */
319 static unsigned long elements_per_page(unsigned long align_bits,
320                                        unsigned long esize,
321                                        unsigned long psize)
322 {
323         unsigned long num, overhead;
324
325         /* First approximation: no extra room for bitmap. */
326         overhead = align_up(sizeof(struct page_header), 1 << align_bits);
327         num = (psize - overhead) / esize;
328
329         while (page_header_size(align_bits, num) + esize * num > psize)
330                 num--;
331         return num;
332 }
333
334 static bool large_page_bucket(unsigned int bucket, unsigned int sp_bits)
335 {
336         unsigned long max_smallsize;
337
338         /* Note: this doesn't take into account page header. */
339         max_smallsize = (1UL << sp_bits) >> MAX_PAGE_OBJECT_ORDER;
340
341         return bucket_to_size(bucket) > max_smallsize;
342 }
343
344 static unsigned int max_bucket(unsigned int lp_bits)
345 {
346         return (lp_bits - MAX_PAGE_OBJECT_ORDER) * INTER_BUCKET_SPACE;
347 }
348
349 void alloc_init(void *pool, unsigned long poolsize)
350 {
351         struct header *head = pool;
352         struct page_header *ph;
353         unsigned int lp_bits, sp_bits, num_buckets;
354         unsigned long header_size, i;
355
356         if (poolsize < MIN_USEFUL_SIZE) {
357                 tiny_alloc_init(pool, poolsize);
358                 return;
359         }
360
361         /* We rely on page numbers fitting in 16 bit. */
362         BUILD_ASSERT(MAX_SMALL_PAGES < 65536);
363         
364         sp_bits = small_page_bits(poolsize);
365         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
366
367         num_buckets = max_bucket(lp_bits);
368
369         head = pool;
370         header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
371
372         memset(head, 0, header_size);
373         for (i = 0; i < num_buckets; i++) {
374                 unsigned long pagesize;
375
376                 if (large_page_bucket(i, sp_bits))
377                         pagesize = 1UL << lp_bits;
378                 else
379                         pagesize = 1UL << sp_bits;
380
381                 head->bs[i].elements_per_page
382                         = elements_per_page(i / INTER_BUCKET_SPACE,
383                                             bucket_to_size(i),
384                                             pagesize);
385         }
386
387         /* They start as all large pages. */
388         memset(head->pagesize, 0xFF, sizeof(head->pagesize));
389         /* FIXME: small pages for last bit? */
390
391         /* Split first page into small pages. */
392         assert(header_size < (1UL << lp_bits));
393         clear_bit(head->pagesize, 0);
394
395         /* Skip over page(s) used by header, add rest to free list */
396         for (i = align_up(header_size, (1 << sp_bits)) >> sp_bits;
397              i < SMALL_PAGES_PER_LARGE_PAGE;
398              i++) {
399                 ph = from_pgnum(head, i, sp_bits);
400                 ph->elements_used = 0;
401                 add_small_page_to_freelist(head, ph, sp_bits);
402         }
403
404         /* Add the rest of the pages as large pages. */
405         i = SMALL_PAGES_PER_LARGE_PAGE;
406         while ((i << sp_bits) + (1 << lp_bits) <= poolsize) {
407                 ph = from_pgnum(head, i, sp_bits);
408                 ph->elements_used = 0;
409                 add_large_page_to_freelist(head, ph, sp_bits);
410                 i += SMALL_PAGES_PER_LARGE_PAGE;
411         }
412 }
413
414 /* A large page worth of small pages are free: delete them from free list. */
415 static void del_large_from_small_free_list(struct header *head,
416                                            struct page_header *ph,
417                                            unsigned int sp_bits)
418 {
419         unsigned long i;
420
421         for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
422                 del_from_list(head, &head->small_free_list,
423                               (void *)ph + (i << sp_bits),
424                               sp_bits);
425         }
426 }
427
428 static bool all_empty(struct header *head,
429                       unsigned long pgnum,
430                       unsigned sp_bits)
431 {
432         unsigned long i;
433
434         for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
435                 struct page_header *ph = from_pgnum(head, pgnum + i, sp_bits);
436                 if (ph->elements_used)
437                         return false;
438         }
439         return true;
440 }
441
442 static void recombine_small_pages(struct header *head, unsigned long poolsize,
443                                   unsigned int sp_bits)
444 {
445         unsigned long i;
446         unsigned int lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
447
448         /* Look for small pages to coalesce, after first large page. */
449         for (i = SMALL_PAGES_PER_LARGE_PAGE;
450              i < (poolsize >> lp_bits) << BITS_FROM_SMALL_TO_LARGE_PAGE;
451              i += SMALL_PAGES_PER_LARGE_PAGE) {
452                 /* Already a large page? */
453                 if (test_bit(head->pagesize, i / SMALL_PAGES_PER_LARGE_PAGE))
454                         continue;
455                 if (all_empty(head, i, sp_bits)) {
456                         struct page_header *ph = from_pgnum(head, i, sp_bits);
457                         set_bit(head->pagesize,
458                                 i / SMALL_PAGES_PER_LARGE_PAGE);
459                         del_large_from_small_free_list(head, ph, sp_bits);
460                         add_large_page_to_freelist(head, ph, sp_bits);
461                 }
462         }
463 }
464
465 static u16 get_large_page(struct header *head, unsigned long poolsize,
466                           unsigned int sp_bits)
467 {
468         unsigned int lp_bits, page;
469
470         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
471
472         page = pop_from_list(head, &head->large_free_list, sp_bits);
473         if (likely(page))
474                 return page;
475
476         recombine_small_pages(head, poolsize, sp_bits);
477
478         return pop_from_list(head, &head->large_free_list, sp_bits);
479 }
480
481 /* Returns small page. */
482 static unsigned long break_up_large_page(struct header *head,
483                                          unsigned int sp_bits,
484                                          u16 lpage)
485 {
486         unsigned int i;
487
488         clear_bit(head->pagesize, lpage >> BITS_FROM_SMALL_TO_LARGE_PAGE);
489
490         for (i = 1; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
491                 struct page_header *ph = from_pgnum(head, lpage + i, sp_bits);
492                 add_small_page_to_freelist(head, ph, sp_bits);
493         }
494
495         return lpage;
496 }
497
498 static u16 get_small_page(struct header *head, unsigned long poolsize,
499                           unsigned int sp_bits)
500 {
501         u16 ret;
502
503         ret = pop_from_list(head, &head->small_free_list, sp_bits);
504         if (likely(ret))
505                 return ret;
506         ret = get_large_page(head, poolsize, sp_bits);
507         if (likely(ret))
508                 ret = break_up_large_page(head, sp_bits, ret);
509         return ret;
510 }
511
512 static bool huge_allocated(struct header *head, unsigned long offset)
513 {
514         unsigned long i;
515         struct huge_alloc *ha;
516
517         for (i = head->huge; i; i = ha->next) {
518                 ha = (void *)((char *)head + i);
519                 if (ha->off <= offset && ha->off + ha->len > offset)
520                         return true;
521         }
522         return false;
523 }
524
525 /* They want something really big.  Aim for contiguous pages (slow). */
526 static void *unlikely_func huge_alloc(void *pool, unsigned long poolsize,
527                                       unsigned long size, unsigned long align)
528 {
529         struct header *head = pool;
530         struct huge_alloc *ha;
531         unsigned long i, sp_bits, lp_bits, num, header_size;
532
533         sp_bits = small_page_bits(poolsize);
534         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
535
536         /* Allocate tracking structure optimistically. */
537         ha = alloc_get(pool, poolsize, sizeof(*ha), ALIGNOF(*ha));
538         if (!ha)
539                 return NULL;
540
541         /* First search for contiguous small pages... */
542         header_size = sizeof(*head) + sizeof(head->bs) * (max_bucket(lp_bits)-1);
543
544         num = 0;
545         for (i = (header_size + (1 << sp_bits) - 1) >> sp_bits;
546              i << sp_bits < poolsize;
547              i++) {
548                 struct page_header *pg;
549                 unsigned long off = (i << sp_bits);
550
551                 /* Skip over large pages. */
552                 if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
553                         i += (1 << BITS_FROM_SMALL_TO_LARGE_PAGE)-1;
554                         continue;
555                 }
556
557                 /* Does this page meet alignment requirements? */
558                 if (!num && off % align != 0)
559                         continue;
560
561                 /* FIXME: This makes us O(n^2). */
562                 if (huge_allocated(head, off)) {
563                         num = 0;
564                         continue;
565                 }
566
567                 pg = (struct page_header *)((char *)head + off);
568                 if (pg->elements_used) {
569                         num = 0;
570                         continue;
571                 }
572
573                 num++;
574                 if (num << sp_bits >= size) {
575                         unsigned long pgnum;
576
577                         /* Remove from free list. */
578                         for (pgnum = i; pgnum > i - num; pgnum--) {
579                                 pg = from_pgnum(head, pgnum, sp_bits);
580                                 del_from_list(head,
581                                               &head->small_free_list,
582                                               pg, sp_bits);
583                         }
584                         ha->off = (i - num + 1) << sp_bits;
585                         ha->len = num << sp_bits;
586                         goto done;
587                 }
588         }
589
590         /* Now search for large pages... */
591         recombine_small_pages(head, poolsize, sp_bits);
592
593         num = 0;
594         for (i = (header_size + (1 << lp_bits) - 1) >> lp_bits;
595              (i << lp_bits) < poolsize; i++) {
596                 struct page_header *pg;
597                 unsigned long off = (i << lp_bits);
598
599                 /* Ignore small pages. */
600                 if (!test_bit(head->pagesize, i))
601                         continue;
602
603                 /* Does this page meet alignment requirements? */
604                 if (!num && off % align != 0)
605                         continue;
606
607                 /* FIXME: This makes us O(n^2). */
608                 if (huge_allocated(head, off)) {
609                         num = 0;
610                         continue;
611                 }
612
613                 pg = (struct page_header *)((char *)head + off);
614                 if (pg->elements_used) {
615                         num = 0;
616                         continue;
617                 }
618
619                 num++;
620                 if (num << lp_bits >= size) {
621                         unsigned long pgnum;
622
623                         /* Remove from free list. */
624                         for (pgnum = i; pgnum > i - num; pgnum--) {
625                                 pg = from_pgnum(head, pgnum, lp_bits);
626                                 del_from_list(head,
627                                               &head->large_free_list,
628                                               pg, sp_bits);
629                         }
630                         ha->off = (i - num + 1) << lp_bits;
631                         ha->len = num << lp_bits;
632                         goto done;
633                 }
634         }
635
636         /* Unable to satisfy: free huge alloc structure. */
637         alloc_free(pool, poolsize, ha);
638         return NULL;
639
640 done:
641         add_to_huge_list(pool, ha);
642         return (char *)pool + ha->off;
643 }
644
645 static void unlikely_func huge_free(struct header *head,
646                                     unsigned long poolsize, void *free)
647 {
648         unsigned long i, off, pgnum, free_off = (char *)free - (char *)head;
649         unsigned int sp_bits, lp_bits;
650         struct huge_alloc *ha;
651
652         for (i = head->huge; i; i = ha->next) {
653                 ha = (void *)((char *)head + i);
654                 if (free_off == ha->off)
655                         break;
656         }
657         assert(i);
658
659         /* Free up all the pages, delete and free ha */
660         sp_bits = small_page_bits(poolsize);
661         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
662         pgnum = free_off >> sp_bits;
663
664         if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
665                 for (off = ha->off; off < ha->off + ha->len; off += 1 << lp_bits) {
666                         add_large_page_to_freelist(head,
667                                                    (void *)((char *)head + off),
668                                                    sp_bits);
669                 }
670         } else {
671                 for (off = ha->off; off < ha->off + ha->len; off += 1 << sp_bits) {
672                         add_small_page_to_freelist(head,
673                                                    (void *)((char *)head + off),
674                                                    sp_bits);
675                 }
676         }
677         del_from_huge(head, ha);
678         alloc_free(head, poolsize, ha);
679 }
680
681 static unsigned long unlikely_func huge_size(struct header *head, void *p)
682 {
683         unsigned long i, off = (char *)p - (char *)head;
684         struct huge_alloc *ha;
685
686         for (i = head->huge; i; i = ha->next) {
687                 ha = (void *)((char *)head + i);
688                 if (off == ha->off) {
689                         return ha->len;
690                 }
691         }
692         abort();
693 }
694
695 void *alloc_get(void *pool, unsigned long poolsize,
696                 unsigned long size, unsigned long align)
697 {
698         struct header *head = pool;
699         unsigned int bucket;
700         unsigned long i;
701         struct bucket_state *bs;
702         struct page_header *ph;
703         unsigned int sp_bits;
704
705         if (poolsize < MIN_USEFUL_SIZE) {
706                 return tiny_alloc_get(pool, poolsize, size, align);
707         }
708
709         size = align_up(size, align);
710         if (unlikely(!size))
711                 size = 1;
712         bucket = size_to_bucket(size);
713
714         sp_bits = small_page_bits(poolsize);
715
716         if (bucket >= max_bucket(sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE)) {
717                 return huge_alloc(pool, poolsize, size, align);
718         }
719
720         bs = &head->bs[bucket];
721
722         if (!bs->page_list) {
723                 struct page_header *ph;
724
725                 if (large_page_bucket(bucket, sp_bits))
726                         bs->page_list = get_large_page(head, poolsize,
727                                                        sp_bits);
728                 else
729                         bs->page_list = get_small_page(head, poolsize,
730                                                        sp_bits);
731                 /* FIXME: Try large-aligned alloc?  Header stuffing? */
732                 if (unlikely(!bs->page_list))
733                         return NULL;
734                 ph = from_pgnum(head, bs->page_list, sp_bits);
735                 ph->bucket = bucket;
736                 ph->elements_used = 0;
737                 ph->next = 0;
738                 memset(ph->used, 0, used_size(bs->elements_per_page));
739         }
740
741         ph = from_pgnum(head, bs->page_list, sp_bits);
742
743         i = find_free_bit(ph->used);
744         set_bit(ph->used, i);
745         ph->elements_used++;
746
747         /* check if this page is now full */
748         if (unlikely(ph->elements_used == bs->elements_per_page)) {
749                 del_from_bucket_list(head, bs, ph, sp_bits);
750                 add_to_bucket_full_list(head, bs, ph, sp_bits);
751         }
752
753         return (char *)ph + page_header_size(ph->bucket / INTER_BUCKET_SPACE,
754                                              bs->elements_per_page)
755                + i * bucket_to_size(bucket);
756 }
757
758 void alloc_free(void *pool, unsigned long poolsize, void *free)
759 {
760         struct header *head = pool;
761         struct bucket_state *bs;
762         unsigned int sp_bits;
763         unsigned long i, pgnum, pgoffset, offset = (char *)free - (char *)pool;
764         bool smallpage;
765         struct page_header *ph;
766
767         if (poolsize < MIN_USEFUL_SIZE) {
768                 return tiny_alloc_free(pool, poolsize, free);
769         }
770         
771         /* Get page header. */
772         sp_bits = small_page_bits(poolsize);
773         pgnum = offset >> sp_bits;
774
775         /* Big page? Round down further. */
776         if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
777                 smallpage = false;
778                 pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
779         } else
780                 smallpage = true;
781
782         /* Step back to page header. */
783         ph = from_pgnum(head, pgnum, sp_bits);
784         if ((void *)ph == free) {
785                 huge_free(head, poolsize, free);
786                 return;
787         }
788
789         bs = &head->bs[ph->bucket];
790         pgoffset = offset - (pgnum << sp_bits)
791                 - page_header_size(ph->bucket / INTER_BUCKET_SPACE,
792                                    bs->elements_per_page);
793
794         if (unlikely(ph->elements_used == bs->elements_per_page)) {
795                 del_from_bucket_full_list(head, bs, ph, sp_bits);
796                 add_to_bucket_list(head, bs, ph, sp_bits);
797         }
798
799         /* Which element are we? */
800         i = pgoffset / bucket_to_size(ph->bucket);
801         clear_bit(ph->used, i);
802         ph->elements_used--;
803
804         if (unlikely(ph->elements_used == 0)) {
805                 bs = &head->bs[ph->bucket];
806                 del_from_bucket_list(head, bs, ph, sp_bits);
807                 if (smallpage)
808                         add_small_page_to_freelist(head, ph, sp_bits);
809                 else
810                         add_large_page_to_freelist(head, ph, sp_bits);
811         }
812 }
813
814 unsigned long alloc_size(void *pool, unsigned long poolsize, void *p)
815 {
816         struct header *head = pool;
817         unsigned int pgnum, sp_bits;
818         unsigned long offset = (char *)p - (char *)pool;
819         struct page_header *ph;
820
821         if (poolsize < MIN_USEFUL_SIZE)
822                 return tiny_alloc_size(pool, poolsize, p);
823
824         /* Get page header. */
825         sp_bits = small_page_bits(poolsize);
826         pgnum = offset >> sp_bits;
827
828         /* Big page? Round down further. */
829         if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE))
830                 pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
831
832         /* Step back to page header. */
833         ph = from_pgnum(head, pgnum, sp_bits);
834         if ((void *)ph == p)
835                 return huge_size(head, p);
836
837         return bucket_to_size(ph->bucket);
838 }
839
840 /* Useful for gdb breakpoints. */
841 static bool check_fail(void)
842 {
843         return false;
844 }
845
846 static unsigned long count_bits(const unsigned long bitmap[],
847                                 unsigned long limit)
848 {
849         unsigned long i, count = 0;
850
851         while (limit >= BITS_PER_LONG) {
852                 count += popcount(bitmap[0]);
853                 bitmap++;
854                 limit -= BITS_PER_LONG;
855         }
856
857         for (i = 0; i < limit; i++)
858                 if (test_bit(bitmap, i))
859                         count++;
860         return count;
861 }
862
863 static bool out_of_bounds(unsigned long pgnum,
864                           unsigned int sp_bits,
865                           unsigned long pagesize,
866                           unsigned long poolsize)
867 {
868         if (((pgnum << sp_bits) >> sp_bits) != pgnum)
869                 return true;
870
871         if ((pgnum << sp_bits) > poolsize)
872                 return true;
873
874         return ((pgnum << sp_bits) + pagesize > poolsize);
875 }
876
877 static bool check_bucket(struct header *head,
878                          unsigned long poolsize,
879                          unsigned long pages[],
880                          struct bucket_state *bs,
881                          unsigned int bindex)
882 {
883         bool lp_bucket;
884         struct page_header *ph;
885         unsigned long taken, i, prev, pagesize, sp_bits, lp_bits;
886
887         sp_bits = small_page_bits(poolsize);
888         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
889
890         lp_bucket = large_page_bucket(bindex, sp_bits);
891
892         pagesize = 1UL << (lp_bucket ? lp_bits : sp_bits);
893
894         /* This many elements fit? */
895         taken = page_header_size(bindex / INTER_BUCKET_SPACE,
896                                  bs->elements_per_page);
897         taken += bucket_to_size(bindex) * bs->elements_per_page;
898         if (taken > pagesize)
899                 return check_fail();
900
901         /* One more wouldn't fit? */
902         taken = page_header_size(bindex / INTER_BUCKET_SPACE,
903                                  bs->elements_per_page + 1);
904         taken += bucket_to_size(bindex) * (bs->elements_per_page + 1);
905         if (taken <= pagesize)
906                 return check_fail();
907
908         /* Walk used list. */
909         prev = 0;
910         for (i = bs->page_list; i; i = ph->next) {
911                 /* Bad pointer? */
912                 if (out_of_bounds(i, sp_bits, pagesize, poolsize))
913                         return check_fail();
914                 /* Wrong size page? */
915                 if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
916                     != lp_bucket)
917                         return check_fail();
918                 /* Large page not on boundary? */
919                 if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
920                         return check_fail();
921                 ph = from_pgnum(head, i, sp_bits);
922                 /* Linked list corrupt? */
923                 if (ph->prev != prev)
924                         return check_fail();
925                 /* Already seen this page? */
926                 if (test_bit(pages, i))
927                         return check_fail();
928                 set_bit(pages, i);
929                 /* Empty or full? */
930                 if (ph->elements_used == 0)
931                         return check_fail();
932                 if (ph->elements_used >= bs->elements_per_page)
933                         return check_fail();
934                 /* Used bits don't agree? */
935                 if (ph->elements_used != count_bits(ph->used,
936                                                     bs->elements_per_page))
937                         return check_fail();
938                 /* Wrong bucket? */
939                 if (ph->bucket != bindex)
940                         return check_fail();
941                 prev = i;
942         }
943
944         /* Walk full list. */
945         prev = 0;
946         for (i = bs->full_list; i; i = ph->next) {
947                 /* Bad pointer? */
948                 if (out_of_bounds(i, sp_bits, pagesize, poolsize))
949                         return check_fail();
950                 /* Wrong size page? */
951                 if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
952                     != lp_bucket)
953                 /* Large page not on boundary? */
954                 if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
955                         return check_fail();
956                 ph = from_pgnum(head, i, sp_bits);
957                 /* Linked list corrupt? */
958                 if (ph->prev != prev)
959                         return check_fail();
960                 /* Already seen this page? */
961                 if (test_bit(pages, i))
962                         return check_fail();
963                 set_bit(pages, i);
964                 /* Not full? */
965                 if (ph->elements_used != bs->elements_per_page)
966                         return check_fail();
967                 /* Used bits don't agree? */
968                 if (ph->elements_used != count_bits(ph->used,
969                                                     bs->elements_per_page))
970                         return check_fail();
971                 /* Wrong bucket? */
972                 if (ph->bucket != bindex)
973                         return check_fail();
974                 prev = i;
975         }
976         return true;
977 }
978
979 bool alloc_check(void *pool, unsigned long poolsize)
980 {
981         struct header *head = pool;
982         unsigned long prev, i, lp_bits, sp_bits, header_size, num_buckets;
983         struct page_header *ph;
984         struct huge_alloc *ha;
985         unsigned long pages[MAX_SMALL_PAGES / BITS_PER_LONG] = { 0 };
986
987         if (poolsize < MIN_USEFUL_SIZE)
988                 return tiny_alloc_check(pool, poolsize);
989
990         sp_bits = small_page_bits(poolsize);
991         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
992
993         num_buckets = max_bucket(lp_bits);
994
995         header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
996
997         /* First, set all bits taken by header. */
998         for (i = 0; i < header_size; i += (1UL << sp_bits))
999                 set_bit(pages, i >> sp_bits);
1000
1001         /* Check small page free list. */
1002         prev = 0;
1003         for (i = head->small_free_list; i; i = ph->next) {
1004                 /* Bad pointer? */
1005                 if (out_of_bounds(i, sp_bits, 1 << sp_bits, poolsize))
1006                         return check_fail();
1007                 /* Large page? */
1008                 if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
1009                         return check_fail();
1010                 ph = from_pgnum(head, i, sp_bits);
1011                 /* Linked list corrupt? */
1012                 if (ph->prev != prev)
1013                         return check_fail();
1014                 /* Already seen this page? */
1015                 if (test_bit(pages, i))
1016                         return check_fail();
1017                 set_bit(pages, i);
1018                 prev = i;
1019         }
1020
1021         /* Check large page free list. */
1022         prev = 0;
1023         for (i = head->large_free_list; i; i = ph->next) {
1024                 /* Bad pointer? */
1025                 if (out_of_bounds(i, sp_bits, 1 << lp_bits, poolsize))
1026                         return check_fail();
1027                 /* Not large page? */
1028                 if (!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
1029                         return check_fail();
1030                 /* Not page boundary? */
1031                 if ((i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
1032                         return check_fail();
1033                 ph = from_pgnum(head, i, sp_bits);
1034                 /* Linked list corrupt? */
1035                 if (ph->prev != prev)
1036                         return check_fail();
1037                 /* Already seen this page? */
1038                 if (test_bit(pages, i))
1039                         return check_fail();
1040                 set_bit(pages, i);
1041                 prev = i;
1042         }
1043
1044         /* Check the buckets. */
1045         for (i = 0; i < max_bucket(lp_bits); i++) {
1046                 struct bucket_state *bs = &head->bs[i];
1047
1048                 if (!check_bucket(head, poolsize, pages, bs, i))
1049                         return false;
1050         }
1051
1052         /* Check the huge alloc list. */
1053         prev = 0;
1054         for (i = head->huge; i; i = ha->next) {
1055                 unsigned long pgbits, j;
1056
1057                 /* Bad pointer? */
1058                 if (i >= poolsize || i + sizeof(*ha) > poolsize)
1059                         return check_fail();
1060                 ha = (void *)((char *)head + i);
1061
1062                 /* Check contents of ha. */
1063                 if (ha->off > poolsize || ha->off + ha->len > poolsize)
1064                         return check_fail();
1065
1066                 /* Large or small page? */
1067                 pgbits = test_bit(head->pagesize, ha->off >> lp_bits)
1068                         ? lp_bits : sp_bits;
1069
1070                 /* Not page boundary? */
1071                 if ((ha->off % (1UL << pgbits)) != 0)
1072                         return check_fail();
1073
1074                 /* Not page length? */
1075                 if ((ha->len % (1UL << pgbits)) != 0)
1076                         return check_fail();
1077
1078                 /* Linked list corrupt? */
1079                 if (ha->prev != prev)
1080                         return check_fail();
1081
1082                 for (j = ha->off; j < ha->off + ha->len; j += (1 << sp_bits)) {
1083                         /* Already seen this page? */
1084                         if (test_bit(pages, j >> sp_bits))
1085                                 return check_fail();
1086                         set_bit(pages, j >> sp_bits);
1087                 }
1088
1089                 prev = i;
1090         }
1091                 
1092         /* Make sure every page accounted for. */
1093         for (i = 0; i < poolsize >> sp_bits; i++) {
1094                 if (!test_bit(pages, i))
1095                         return check_fail();
1096                 if (test_bit(head->pagesize,
1097                              i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
1098                         /* Large page, skip rest. */
1099                         i += SMALL_PAGES_PER_LARGE_PAGE - 1;
1100                 }
1101         }
1102
1103         return true;
1104 }
1105
1106 static unsigned long print_overhead(FILE *out, const char *desc,
1107                                     unsigned long bytes,
1108                                     unsigned long poolsize)
1109 {
1110         fprintf(out, "Overhead (%s): %lu bytes (%.3g%%)\n",
1111                 desc, bytes, 100.0 * bytes / poolsize);
1112         return bytes;
1113 }
1114
1115 static unsigned long count_list(struct header *head,
1116                                 u16 pgnum,
1117                                 unsigned int sp_bits,
1118                                 unsigned long *total_elems)
1119 {
1120         struct page_header *p;
1121         unsigned long ret = 0;
1122
1123         while (pgnum) {
1124                 p = from_pgnum(head, pgnum, sp_bits);
1125                 if (total_elems)
1126                         (*total_elems) += p->elements_used;
1127                 ret++;
1128                 pgnum = p->next;
1129         }
1130         return ret;
1131 }
1132
1133 static unsigned long visualize_bucket(FILE *out, struct header *head,
1134                                       unsigned int bucket,
1135                                       unsigned long poolsize,
1136                                       unsigned int sp_bits)
1137 {
1138         unsigned long num_full, num_partial, num_pages, page_size,
1139                 elems, hdr_min, hdr_size, elems_per_page, overhead = 0;
1140
1141         elems_per_page = head->bs[bucket].elements_per_page;
1142
1143         /* If we used byte-based bitmaps, we could get pg hdr to: */
1144         hdr_min = sizeof(struct page_header)
1145                 - sizeof(((struct page_header *)0)->used)
1146                 + align_up(elems_per_page, CHAR_BIT) / CHAR_BIT;
1147         hdr_size = page_header_size(bucket / INTER_BUCKET_SPACE,
1148                                     elems_per_page);
1149
1150         elems = 0;
1151         num_full = count_list(head, head->bs[bucket].full_list, sp_bits,
1152                               &elems);
1153         num_partial = count_list(head, head->bs[bucket].page_list, sp_bits,
1154                                  &elems);
1155         num_pages = num_full + num_partial;
1156         if (!num_pages)
1157                 return 0;
1158
1159         fprintf(out, "Bucket %u (%lu bytes):"
1160                 " %lu full, %lu partial = %lu elements\n",
1161                 bucket, bucket_to_size(bucket), num_full, num_partial, elems);
1162         /* Strict requirement of page header size. */
1163         overhead += print_overhead(out, "page headers",
1164                                    hdr_min * num_pages, poolsize);
1165         /* Gap between minimal page header and actual start. */
1166         overhead += print_overhead(out, "page post-header alignments",
1167                                    (hdr_size - hdr_min) * num_pages, poolsize);
1168         /* Between last element and end of page. */
1169         page_size = (1 << sp_bits);
1170         if (large_page_bucket(bucket, sp_bits))
1171                 page_size <<= BITS_FROM_SMALL_TO_LARGE_PAGE;
1172
1173         overhead += print_overhead(out, "page tails",
1174                                    (page_size - (hdr_size
1175                                                  + (elems_per_page
1176                                                     * bucket_to_size(bucket))))
1177                                    * num_pages, poolsize);
1178         return overhead;
1179 }
1180
1181 void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
1182 {
1183         struct header *head = pool;
1184         unsigned long i, lp_bits, sp_bits, header_size, num_buckets, count,
1185                 overhead = 0;
1186
1187         fprintf(out, "Pool %p size %lu: (%s allocator)\n", pool, poolsize,
1188                 poolsize < MIN_USEFUL_SIZE ? "tiny" : "standard");
1189
1190         if (poolsize < MIN_USEFUL_SIZE) {
1191                 tiny_alloc_visualize(out, pool, poolsize);
1192                 return;
1193         }
1194         
1195         sp_bits = small_page_bits(poolsize);
1196         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
1197
1198         num_buckets = max_bucket(lp_bits);
1199         header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
1200
1201         fprintf(out, "Large page size %lu, small page size %lu.\n",
1202                 1UL << lp_bits, 1UL << sp_bits);
1203         overhead += print_overhead(out, "unused pool tail",
1204                                    poolsize % (1 << lp_bits), poolsize);
1205         fprintf(out, "Main header %lu bytes (%lu small pages).\n",
1206                 header_size, align_up(header_size, 1 << sp_bits) >> sp_bits);
1207         overhead += print_overhead(out, "partial header page",
1208                                    align_up(header_size, 1 << sp_bits)
1209                                    - header_size, poolsize);
1210         /* Total large pages. */
1211         i = count_bits(head->pagesize, poolsize >> lp_bits);
1212         /* Used pages. */
1213         count = i - count_list(head, head->large_free_list, sp_bits, NULL);
1214         fprintf(out, "%lu/%lu large pages used (%.3g%%)\n",
1215                 count, i, count ? 100.0 * count / i : 0.0);
1216
1217         /* Total small pages. */
1218         i = ((poolsize >> lp_bits) - i) << BITS_FROM_SMALL_TO_LARGE_PAGE;
1219         /* Used pages */
1220         count = i - count_list(head, head->small_free_list, sp_bits, NULL);
1221         fprintf(out, "%lu/%lu small pages used (%.3g%%)\n",
1222                 count, i, count ? 100.0 * count / i : 0.0);
1223
1224         /* Summary of each bucket. */
1225         fprintf(out, "%lu buckets:\n", num_buckets);
1226         for (i = 0; i < num_buckets; i++)
1227                 overhead += visualize_bucket(out, head, i, poolsize, sp_bits);
1228
1229         print_overhead(out, "total", overhead, poolsize);
1230 }