]> git.ozlabs.org Git - ccan/blob - ccan/alloc/alloc.c
40b5b6ec6ddbedd9f16cb0aade0e75960727e809
[ccan] / ccan / alloc / alloc.c
1 #include <unistd.h>
2 #include <stdint.h>
3 #include <string.h>
4 #include <limits.h>
5 #include <assert.h>
6 #include <stdlib.h>
7 #include "alloc.h"
8 #include "bitops.h"
9 #include "tiny.h"
10 #include <ccan/build_assert/build_assert.h>
11 #include <ccan/likely/likely.h>
12 #include <ccan/alignof/alignof.h>
13 #include <ccan/short_types/short_types.h>
14 #include "config.h"
15
16 /*
17    Inspired by (and parts taken from) Andrew Tridgell's alloc_mmap:
18    http://samba.org/~tridge/junkcode/alloc_mmap/
19
20    Copyright (C) Andrew Tridgell 2007
21    
22    This library is free software; you can redistribute it and/or
23    modify it under the terms of the GNU Lesser General Public
24    License as published by the Free Software Foundation; either
25    version 2 of the License, or (at your option) any later version.
26
27    This library is distributed in the hope that it will be useful,
28    but WITHOUT ANY WARRANTY; without even the implied warranty of
29    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
30    Lesser General Public License for more details.
31
32    You should have received a copy of the GNU Lesser General Public
33    License along with this library; if not, write to the Free Software
34    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
35  */
36
37 /* We divide the pool into this many large pages (nearest power of 2) */
38 #define MAX_LARGE_PAGES (256UL)
39
40 /* 32 small pages == 1 large page. */
41 #define BITS_FROM_SMALL_TO_LARGE_PAGE 5
42
43 #define MAX_SMALL_PAGES (MAX_LARGE_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE)
44
45 /* Smallest pool size for this scheme: 128-byte small pages.  That's
46  * 9/13% overhead for 32/64 bit. */
47 #define MIN_USEFUL_SIZE (MAX_SMALL_PAGES * 128)
48
49 /* Every 4 buckets, we jump up a power of 2. ...8 10 12 14 16 20 24 28 32... */
50 #define INTER_BUCKET_SPACE 4
51
52 #define SMALL_PAGES_PER_LARGE_PAGE (1 << BITS_FROM_SMALL_TO_LARGE_PAGE)
53
54 /* FIXME: Figure this out properly. */
55 #define MAX_SIZE (1 << 30)
56
57 /* How few object to fit in a page before using a larger one? (8) */
58 #define MAX_PAGE_OBJECT_ORDER   3
59
60 #define BITS_PER_LONG (sizeof(long) * CHAR_BIT)
61
62 struct bucket_state {
63         u32 elements_per_page;
64         u16 page_list;
65         u16 full_list;
66 };
67
68 struct header {
69         /* Bitmap of which pages are large. */
70         unsigned long pagesize[MAX_LARGE_PAGES / BITS_PER_LONG];
71
72         /* List of unused small/large pages. */
73         u16 small_free_list;
74         u16 large_free_list;
75
76         /* List of huge allocs. */
77         unsigned long huge;
78
79         /* This is less defined: we have two buckets for each power of 2 */
80         struct bucket_state bs[1];
81 };
82
83 struct huge_alloc {
84         unsigned long next, prev;
85         unsigned long off, len;
86 };
87
88 struct page_header {
89         u16 next, prev;
90         /* FIXME: We can just count all-0 and all-1 used[] elements. */
91         unsigned elements_used : 25;
92         unsigned bucket : 7;
93         unsigned long used[1]; /* One bit per element. */
94 };
95
96 /*
97  * Every 4 buckets, the size doubles.
98  * Between buckets, sizes increase linearly.
99  *
100  * eg. bucket 40 = 2^10                 = 1024
101  *     bucket 41 = 2^10 + 2^10*4        = 1024 + 256
102  *     bucket 42 = 2^10 + 2^10*4        = 1024 + 512
103  *     bucket 43 = 2^10 + 2^10*4        = 1024 + 768
104  *     bucket 45 = 2^11                 = 2048
105  *
106  * Care is taken to handle low numbered buckets, at cost of overflow.
107  */
108 static unsigned long bucket_to_size(unsigned int bucket)
109 {
110         unsigned long base = 1 << (bucket / INTER_BUCKET_SPACE);
111         return base + ((bucket % INTER_BUCKET_SPACE)
112                        << (bucket / INTER_BUCKET_SPACE))
113                 / INTER_BUCKET_SPACE;
114 }
115
116 /*
117  * Say size is 10.
118  *   fls(size/2) == 3.  1 << 3 == 8, so we're 2 too large, out of a possible
119  * 8 too large.  That's 1/4 of the way to the next power of 2 == 1 bucket.
120  *
121  * We make sure we round up.  Note that this fails on 32 bit at size
122  * 1879048193 (around bucket 120).
123  */
124 static unsigned int size_to_bucket(unsigned long size)
125 {
126         unsigned int base = fls(size/2);
127         unsigned long overshoot;
128
129         overshoot = size - (1 << base);
130         return base * INTER_BUCKET_SPACE
131                 + ((overshoot * INTER_BUCKET_SPACE + (1 << base)-1) >> base);
132 }
133
134 static unsigned int small_page_bits(unsigned long poolsize)
135 {
136         return fls(poolsize / MAX_SMALL_PAGES - 1);
137 }
138
139 static struct page_header *from_pgnum(struct header *head,
140                                       unsigned long pgnum,
141                                       unsigned sp_bits)
142 {
143         return (struct page_header *)((char *)head + (pgnum << sp_bits));
144 }
145
146 static u16 to_pgnum(struct header *head, void *p, unsigned sp_bits)
147 {
148         return ((char *)p - (char *)head) >> sp_bits;
149 }
150
151 static size_t used_size(unsigned int num_elements)
152 {
153         return align_up(num_elements, BITS_PER_LONG) / CHAR_BIT;
154 }
155
156 /*
157  * We always align the first entry to the lower power of 2.
158  * eg. the 12-byte bucket gets 8-byte aligned.  The 4096-byte bucket
159  * gets 4096-byte aligned.
160  */
161 static unsigned long page_header_size(unsigned int align_bits,
162                                       unsigned long num_elements)
163 {
164         unsigned long size;
165
166         size = sizeof(struct page_header)
167                 - sizeof(((struct page_header *)0)->used)
168                 + used_size(num_elements);
169         return align_up(size, 1 << align_bits);
170 }
171
172 static void add_to_list(struct header *head,
173                         u16 *list, struct page_header *ph, unsigned sp_bits)
174 {
175         unsigned long h = *list, offset = to_pgnum(head, ph, sp_bits);
176
177         ph->next = h;
178         if (h) {
179                 struct page_header *prev = from_pgnum(head, h, sp_bits);
180                 assert(prev->prev == 0);
181                 prev->prev = offset;
182         }
183         *list = offset;
184         ph->prev = 0;
185 }
186
187 static void del_from_list(struct header *head,
188                           u16 *list, struct page_header *ph, unsigned sp_bits)
189 {
190         /* Front of list? */
191         if (ph->prev == 0) {
192                 *list = ph->next;
193         } else {
194                 struct page_header *prev = from_pgnum(head, ph->prev, sp_bits);
195                 prev->next = ph->next;
196         }
197         if (ph->next != 0) {
198                 struct page_header *next = from_pgnum(head, ph->next, sp_bits);
199                 next->prev = ph->prev;
200         }
201 }
202
203 static u16 pop_from_list(struct header *head,
204                                    u16 *list,
205                                    unsigned int sp_bits)
206 {
207         u16 h = *list;
208         struct page_header *ph = from_pgnum(head, h, sp_bits);
209
210         if (likely(h)) {
211                 *list = ph->next;
212                 if (*list)
213                         from_pgnum(head, *list, sp_bits)->prev = 0;
214         }
215         return h;
216 }
217
218 static void add_to_huge_list(struct header *head, struct huge_alloc *ha)
219 {
220         unsigned long h = head->huge;
221         unsigned long offset = (char *)ha - (char *)head;
222
223         ha->next = h;
224         if (h) {
225                 struct huge_alloc *prev = (void *)((char *)head + h);
226                 assert(prev->prev == 0);
227                 prev->prev = offset;
228         }
229         head->huge = offset;
230         ha->prev = 0;
231 }
232
233 static void del_from_huge(struct header *head, struct huge_alloc *ha)
234 {
235         /* Front of list? */
236         if (ha->prev == 0) {
237                 head->huge = ha->next;
238         } else {
239                 struct huge_alloc *prev = (void *)((char *)head + ha->prev);
240                 prev->next = ha->next;
241         }
242         if (ha->next != 0) {
243                 struct huge_alloc *next = (void *)((char *)head + ha->next);
244                 next->prev = ha->prev;
245         }
246 }
247
248 static void add_small_page_to_freelist(struct header *head,
249                                        struct page_header *ph,
250                                        unsigned int sp_bits)
251 {
252         add_to_list(head, &head->small_free_list, ph, sp_bits);
253 }
254
255 static void add_large_page_to_freelist(struct header *head,
256                                        struct page_header *ph,
257                                        unsigned int sp_bits)
258 {
259         add_to_list(head, &head->large_free_list, ph, sp_bits);
260 }
261
262 static void add_to_bucket_list(struct header *head,
263                                struct bucket_state *bs,
264                                struct page_header *ph,
265                                unsigned int sp_bits)
266 {
267         add_to_list(head, &bs->page_list, ph, sp_bits);
268 }
269
270 static void del_from_bucket_list(struct header *head,
271                                  struct bucket_state *bs,
272                                  struct page_header *ph,
273                                  unsigned int sp_bits)
274 {
275         del_from_list(head, &bs->page_list, ph, sp_bits);
276 }
277
278 static void del_from_bucket_full_list(struct header *head,
279                                       struct bucket_state *bs,
280                                       struct page_header *ph,
281                                       unsigned int sp_bits)
282 {
283         del_from_list(head, &bs->full_list, ph, sp_bits);
284 }
285
286 static void add_to_bucket_full_list(struct header *head,
287                                     struct bucket_state *bs,
288                                     struct page_header *ph,
289                                     unsigned int sp_bits)
290 {
291         add_to_list(head, &bs->full_list, ph, sp_bits);
292 }
293
294 static void clear_bit(unsigned long bitmap[], unsigned int off)
295 {
296         bitmap[off / BITS_PER_LONG] &= ~(1 << (off % BITS_PER_LONG));
297 }
298
299 static bool test_bit(const unsigned long bitmap[], unsigned int off)
300 {
301         return bitmap[off / BITS_PER_LONG] & (1 << (off % BITS_PER_LONG));
302 }
303
304 static void set_bit(unsigned long bitmap[], unsigned int off)
305 {
306         bitmap[off / BITS_PER_LONG] |= (1 << (off % BITS_PER_LONG));
307 }
308
309 /* There must be a bit to be found. */
310 static unsigned int find_free_bit(const unsigned long bitmap[])
311 {
312         unsigned int i;
313
314         for (i = 0; bitmap[i] == -1UL; i++);
315         return (i*BITS_PER_LONG) + ffsl(~bitmap[i]) - 1;
316 }
317
318 /* How many elements can we fit in a page? */
319 static unsigned long elements_per_page(unsigned long align_bits,
320                                        unsigned long esize,
321                                        unsigned long psize)
322 {
323         unsigned long num, overhead;
324
325         /* First approximation: no extra room for bitmap. */
326         overhead = align_up(sizeof(struct page_header), 1 << align_bits);
327         num = (psize - overhead) / esize;
328
329         while (page_header_size(align_bits, num) + esize * num > psize)
330                 num--;
331         return num;
332 }
333
334 static bool large_page_bucket(unsigned int bucket, unsigned int sp_bits)
335 {
336         unsigned long max_smallsize;
337
338         /* Note: this doesn't take into account page header. */
339         max_smallsize = (1UL << sp_bits) >> MAX_PAGE_OBJECT_ORDER;
340
341         return bucket_to_size(bucket) > max_smallsize;
342 }
343
344 static unsigned int max_bucket(unsigned int lp_bits)
345 {
346         return (lp_bits - MAX_PAGE_OBJECT_ORDER) * INTER_BUCKET_SPACE;
347 }
348
349 void alloc_init(void *pool, unsigned long poolsize)
350 {
351         struct header *head = pool;
352         struct page_header *ph;
353         unsigned int lp_bits, sp_bits, num_buckets;
354         unsigned long header_size, i;
355
356         if (poolsize < MIN_USEFUL_SIZE) {
357                 tiny_alloc_init(pool, poolsize);
358                 return;
359         }
360
361         /* We rely on page numbers fitting in 16 bit. */
362         BUILD_ASSERT(MAX_SMALL_PAGES < 65536);
363         
364         sp_bits = small_page_bits(poolsize);
365         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
366
367         num_buckets = max_bucket(lp_bits);
368
369         head = pool;
370         header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
371
372         memset(head, 0, header_size);
373         for (i = 0; i < num_buckets; i++) {
374                 unsigned long pagesize;
375
376                 if (large_page_bucket(i, sp_bits))
377                         pagesize = 1UL << lp_bits;
378                 else
379                         pagesize = 1UL << sp_bits;
380
381                 head->bs[i].elements_per_page
382                         = elements_per_page(i / INTER_BUCKET_SPACE,
383                                             bucket_to_size(i),
384                                             pagesize);
385         }
386
387         /* They start as all large pages. */
388         memset(head->pagesize, 0xFF, sizeof(head->pagesize));
389         /* FIXME: small pages for last bit? */
390
391         /* Split first page into small pages. */
392         assert(header_size < (1UL << lp_bits));
393         clear_bit(head->pagesize, 0);
394
395         /* Skip over page(s) used by header, add rest to free list */
396         for (i = align_up(header_size, (1 << sp_bits)) >> sp_bits;
397              i < SMALL_PAGES_PER_LARGE_PAGE;
398              i++) {
399                 ph = from_pgnum(head, i, sp_bits);
400                 ph->elements_used = 0;
401                 add_small_page_to_freelist(head, ph, sp_bits);
402         }
403
404         /* Add the rest of the pages as large pages. */
405         i = SMALL_PAGES_PER_LARGE_PAGE;
406         while ((i << sp_bits) + (1 << lp_bits) <= poolsize) {
407                 assert(i < MAX_SMALL_PAGES);
408                 ph = from_pgnum(head, i, sp_bits);
409                 ph->elements_used = 0;
410                 add_large_page_to_freelist(head, ph, sp_bits);
411                 i += SMALL_PAGES_PER_LARGE_PAGE;
412         }
413 }
414
415 /* A large page worth of small pages are free: delete them from free list. */
416 static void del_large_from_small_free_list(struct header *head,
417                                            struct page_header *ph,
418                                            unsigned int sp_bits)
419 {
420         unsigned long i;
421
422         for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
423                 del_from_list(head, &head->small_free_list,
424                               (void *)ph + (i << sp_bits),
425                               sp_bits);
426         }
427 }
428
429 static bool all_empty(struct header *head,
430                       unsigned long pgnum,
431                       unsigned sp_bits)
432 {
433         unsigned long i;
434
435         for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
436                 struct page_header *ph = from_pgnum(head, pgnum + i, sp_bits);
437                 if (ph->elements_used)
438                         return false;
439         }
440         return true;
441 }
442
443 static void recombine_small_pages(struct header *head, unsigned long poolsize,
444                                   unsigned int sp_bits)
445 {
446         unsigned long i;
447         unsigned int lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
448
449         /* Look for small pages to coalesce, after first large page. */
450         for (i = SMALL_PAGES_PER_LARGE_PAGE;
451              i < (poolsize >> lp_bits) << BITS_FROM_SMALL_TO_LARGE_PAGE;
452              i += SMALL_PAGES_PER_LARGE_PAGE) {
453                 /* Already a large page? */
454                 if (test_bit(head->pagesize, i / SMALL_PAGES_PER_LARGE_PAGE))
455                         continue;
456                 if (all_empty(head, i, sp_bits)) {
457                         struct page_header *ph = from_pgnum(head, i, sp_bits);
458                         set_bit(head->pagesize,
459                                 i / SMALL_PAGES_PER_LARGE_PAGE);
460                         del_large_from_small_free_list(head, ph, sp_bits);
461                         add_large_page_to_freelist(head, ph, sp_bits);
462                 }
463         }
464 }
465
466 static u16 get_large_page(struct header *head, unsigned long poolsize,
467                           unsigned int sp_bits)
468 {
469         unsigned int lp_bits, page;
470
471         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
472
473         page = pop_from_list(head, &head->large_free_list, sp_bits);
474         if (likely(page))
475                 return page;
476
477         recombine_small_pages(head, poolsize, sp_bits);
478
479         return pop_from_list(head, &head->large_free_list, sp_bits);
480 }
481
482 /* Returns small page. */
483 static unsigned long break_up_large_page(struct header *head,
484                                          unsigned int sp_bits,
485                                          u16 lpage)
486 {
487         unsigned int i;
488
489         clear_bit(head->pagesize, lpage >> BITS_FROM_SMALL_TO_LARGE_PAGE);
490
491         for (i = 1; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
492                 struct page_header *ph = from_pgnum(head, lpage + i, sp_bits);
493                 /* Initialize this: huge_alloc reads it. */
494                 ph->elements_used = 0;
495                 add_small_page_to_freelist(head, ph, sp_bits);
496         }
497
498         return lpage;
499 }
500
501 static u16 get_small_page(struct header *head, unsigned long poolsize,
502                           unsigned int sp_bits)
503 {
504         u16 ret;
505
506         ret = pop_from_list(head, &head->small_free_list, sp_bits);
507         if (likely(ret))
508                 return ret;
509         ret = get_large_page(head, poolsize, sp_bits);
510         if (likely(ret))
511                 ret = break_up_large_page(head, sp_bits, ret);
512         return ret;
513 }
514
515 static bool huge_allocated(struct header *head, unsigned long offset)
516 {
517         unsigned long i;
518         struct huge_alloc *ha;
519
520         for (i = head->huge; i; i = ha->next) {
521                 ha = (void *)((char *)head + i);
522                 if (ha->off <= offset && ha->off + ha->len > offset)
523                         return true;
524         }
525         return false;
526 }
527
528 /* They want something really big.  Aim for contiguous pages (slow). */
529 static void *unlikely_func huge_alloc(void *pool, unsigned long poolsize,
530                                       unsigned long size, unsigned long align)
531 {
532         struct header *head = pool;
533         struct huge_alloc *ha;
534         unsigned long i, sp_bits, lp_bits, num, header_size;
535
536         sp_bits = small_page_bits(poolsize);
537         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
538
539         /* Allocate tracking structure optimistically. */
540         ha = alloc_get(pool, poolsize, sizeof(*ha), ALIGNOF(*ha));
541         if (!ha)
542                 return NULL;
543
544         /* First search for contiguous small pages... */
545         header_size = sizeof(*head) + sizeof(head->bs) * (max_bucket(lp_bits)-1);
546
547         num = 0;
548         for (i = (header_size + (1 << sp_bits) - 1) >> sp_bits;
549              i << sp_bits < poolsize;
550              i++) {
551                 struct page_header *pg;
552                 unsigned long off = (i << sp_bits);
553
554                 /* Skip over large pages. */
555                 if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
556                         i += (1 << BITS_FROM_SMALL_TO_LARGE_PAGE)-1;
557                         continue;
558                 }
559
560                 /* Does this page meet alignment requirements? */
561                 if (!num && off % align != 0)
562                         continue;
563
564                 /* FIXME: This makes us O(n^2). */
565                 if (huge_allocated(head, off)) {
566                         num = 0;
567                         continue;
568                 }
569
570                 pg = (struct page_header *)((char *)head + off);
571                 if (pg->elements_used) {
572                         num = 0;
573                         continue;
574                 }
575
576                 num++;
577                 if (num << sp_bits >= size) {
578                         unsigned long pgnum;
579
580                         /* Remove from free list. */
581                         for (pgnum = i; pgnum > i - num; pgnum--) {
582                                 pg = from_pgnum(head, pgnum, sp_bits);
583                                 del_from_list(head,
584                                               &head->small_free_list,
585                                               pg, sp_bits);
586                         }
587                         ha->off = (i - num + 1) << sp_bits;
588                         ha->len = num << sp_bits;
589                         goto done;
590                 }
591         }
592
593         /* Now search for large pages... */
594         recombine_small_pages(head, poolsize, sp_bits);
595
596         num = 0;
597         for (i = (header_size + (1 << lp_bits) - 1) >> lp_bits;
598              (i << lp_bits) < poolsize; i++) {
599                 struct page_header *pg;
600                 unsigned long off = (i << lp_bits);
601
602                 /* Ignore small pages. */
603                 if (!test_bit(head->pagesize, i))
604                         continue;
605
606                 /* Does this page meet alignment requirements? */
607                 if (!num && off % align != 0)
608                         continue;
609
610                 /* FIXME: This makes us O(n^2). */
611                 if (huge_allocated(head, off)) {
612                         num = 0;
613                         continue;
614                 }
615
616                 pg = (struct page_header *)((char *)head + off);
617                 if (pg->elements_used) {
618                         num = 0;
619                         continue;
620                 }
621
622                 num++;
623                 if (num << lp_bits >= size) {
624                         unsigned long pgnum;
625
626                         /* Remove from free list. */
627                         for (pgnum = i; pgnum > i - num; pgnum--) {
628                                 pg = from_pgnum(head, pgnum, lp_bits);
629                                 del_from_list(head,
630                                               &head->large_free_list,
631                                               pg, sp_bits);
632                         }
633                         ha->off = (i - num + 1) << lp_bits;
634                         ha->len = num << lp_bits;
635                         goto done;
636                 }
637         }
638
639         /* Unable to satisfy: free huge alloc structure. */
640         alloc_free(pool, poolsize, ha);
641         return NULL;
642
643 done:
644         add_to_huge_list(pool, ha);
645         return (char *)pool + ha->off;
646 }
647
648 static void unlikely_func huge_free(struct header *head,
649                                     unsigned long poolsize, void *free)
650 {
651         unsigned long i, off, pgnum, free_off = (char *)free - (char *)head;
652         unsigned int sp_bits, lp_bits;
653         struct huge_alloc *ha;
654
655         for (i = head->huge; i; i = ha->next) {
656                 ha = (void *)((char *)head + i);
657                 if (free_off == ha->off)
658                         break;
659         }
660         assert(i);
661
662         /* Free up all the pages, delete and free ha */
663         sp_bits = small_page_bits(poolsize);
664         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
665         pgnum = free_off >> sp_bits;
666
667         if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
668                 for (off = ha->off; off < ha->off + ha->len; off += 1 << lp_bits) {
669                         add_large_page_to_freelist(head,
670                                                    (void *)((char *)head + off),
671                                                    sp_bits);
672                 }
673         } else {
674                 for (off = ha->off; off < ha->off + ha->len; off += 1 << sp_bits) {
675                         add_small_page_to_freelist(head,
676                                                    (void *)((char *)head + off),
677                                                    sp_bits);
678                 }
679         }
680         del_from_huge(head, ha);
681         alloc_free(head, poolsize, ha);
682 }
683
684 static unsigned long unlikely_func huge_size(struct header *head, void *p)
685 {
686         unsigned long i, off = (char *)p - (char *)head;
687         struct huge_alloc *ha;
688
689         for (i = head->huge; i; i = ha->next) {
690                 ha = (void *)((char *)head + i);
691                 if (off == ha->off) {
692                         return ha->len;
693                 }
694         }
695         abort();
696 }
697
698 void *alloc_get(void *pool, unsigned long poolsize,
699                 unsigned long size, unsigned long align)
700 {
701         struct header *head = pool;
702         unsigned int bucket;
703         unsigned long i;
704         struct bucket_state *bs;
705         struct page_header *ph;
706         unsigned int sp_bits;
707
708         if (poolsize < MIN_USEFUL_SIZE) {
709                 return tiny_alloc_get(pool, poolsize, size, align);
710         }
711
712         size = align_up(size, align);
713         if (unlikely(!size))
714                 size = 1;
715         bucket = size_to_bucket(size);
716
717         sp_bits = small_page_bits(poolsize);
718
719         if (bucket >= max_bucket(sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE)) {
720                 return huge_alloc(pool, poolsize, size, align);
721         }
722
723         bs = &head->bs[bucket];
724
725         if (!bs->page_list) {
726                 struct page_header *ph;
727
728                 if (large_page_bucket(bucket, sp_bits))
729                         bs->page_list = get_large_page(head, poolsize,
730                                                        sp_bits);
731                 else
732                         bs->page_list = get_small_page(head, poolsize,
733                                                        sp_bits);
734                 /* FIXME: Try large-aligned alloc?  Header stuffing? */
735                 if (unlikely(!bs->page_list))
736                         return NULL;
737                 ph = from_pgnum(head, bs->page_list, sp_bits);
738                 ph->bucket = bucket;
739                 ph->elements_used = 0;
740                 ph->next = 0;
741                 memset(ph->used, 0, used_size(bs->elements_per_page));
742         }
743
744         ph = from_pgnum(head, bs->page_list, sp_bits);
745
746         i = find_free_bit(ph->used);
747         set_bit(ph->used, i);
748         ph->elements_used++;
749
750         /* check if this page is now full */
751         if (unlikely(ph->elements_used == bs->elements_per_page)) {
752                 del_from_bucket_list(head, bs, ph, sp_bits);
753                 add_to_bucket_full_list(head, bs, ph, sp_bits);
754         }
755
756         return (char *)ph + page_header_size(ph->bucket / INTER_BUCKET_SPACE,
757                                              bs->elements_per_page)
758                + i * bucket_to_size(bucket);
759 }
760
761 void alloc_free(void *pool, unsigned long poolsize, void *free)
762 {
763         struct header *head = pool;
764         struct bucket_state *bs;
765         unsigned int sp_bits;
766         unsigned long i, pgnum, pgoffset, offset = (char *)free - (char *)pool;
767         bool smallpage;
768         struct page_header *ph;
769
770         if (poolsize < MIN_USEFUL_SIZE) {
771                 return tiny_alloc_free(pool, poolsize, free);
772         }
773         
774         /* Get page header. */
775         sp_bits = small_page_bits(poolsize);
776         pgnum = offset >> sp_bits;
777
778         /* Big page? Round down further. */
779         if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
780                 smallpage = false;
781                 pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
782         } else
783                 smallpage = true;
784
785         /* Step back to page header. */
786         ph = from_pgnum(head, pgnum, sp_bits);
787         if ((void *)ph == free) {
788                 huge_free(head, poolsize, free);
789                 return;
790         }
791
792         bs = &head->bs[ph->bucket];
793         pgoffset = offset - (pgnum << sp_bits)
794                 - page_header_size(ph->bucket / INTER_BUCKET_SPACE,
795                                    bs->elements_per_page);
796
797         if (unlikely(ph->elements_used == bs->elements_per_page)) {
798                 del_from_bucket_full_list(head, bs, ph, sp_bits);
799                 add_to_bucket_list(head, bs, ph, sp_bits);
800         }
801
802         /* Which element are we? */
803         i = pgoffset / bucket_to_size(ph->bucket);
804         clear_bit(ph->used, i);
805         ph->elements_used--;
806
807         if (unlikely(ph->elements_used == 0)) {
808                 bs = &head->bs[ph->bucket];
809                 del_from_bucket_list(head, bs, ph, sp_bits);
810                 if (smallpage)
811                         add_small_page_to_freelist(head, ph, sp_bits);
812                 else
813                         add_large_page_to_freelist(head, ph, sp_bits);
814         }
815 }
816
817 unsigned long alloc_size(void *pool, unsigned long poolsize, void *p)
818 {
819         struct header *head = pool;
820         unsigned int pgnum, sp_bits;
821         unsigned long offset = (char *)p - (char *)pool;
822         struct page_header *ph;
823
824         if (poolsize < MIN_USEFUL_SIZE)
825                 return tiny_alloc_size(pool, poolsize, p);
826
827         /* Get page header. */
828         sp_bits = small_page_bits(poolsize);
829         pgnum = offset >> sp_bits;
830
831         /* Big page? Round down further. */
832         if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE))
833                 pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
834
835         /* Step back to page header. */
836         ph = from_pgnum(head, pgnum, sp_bits);
837         if ((void *)ph == p)
838                 return huge_size(head, p);
839
840         return bucket_to_size(ph->bucket);
841 }
842
843 /* Useful for gdb breakpoints. */
844 static bool check_fail(void)
845 {
846         return false;
847 }
848
849 static unsigned long count_bits(const unsigned long bitmap[],
850                                 unsigned long limit)
851 {
852         unsigned long i, count = 0;
853
854         while (limit >= BITS_PER_LONG) {
855                 count += popcount(bitmap[0]);
856                 bitmap++;
857                 limit -= BITS_PER_LONG;
858         }
859
860         for (i = 0; i < limit; i++)
861                 if (test_bit(bitmap, i))
862                         count++;
863         return count;
864 }
865
866 static bool out_of_bounds(unsigned long pgnum,
867                           unsigned int sp_bits,
868                           unsigned long pagesize,
869                           unsigned long poolsize)
870 {
871         if (((pgnum << sp_bits) >> sp_bits) != pgnum)
872                 return true;
873
874         if ((pgnum << sp_bits) > poolsize)
875                 return true;
876
877         return ((pgnum << sp_bits) + pagesize > poolsize);
878 }
879
880 static bool check_bucket(struct header *head,
881                          unsigned long poolsize,
882                          unsigned long pages[],
883                          struct bucket_state *bs,
884                          unsigned int bindex)
885 {
886         bool lp_bucket;
887         struct page_header *ph;
888         unsigned long taken, i, prev, pagesize, sp_bits, lp_bits;
889
890         sp_bits = small_page_bits(poolsize);
891         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
892
893         lp_bucket = large_page_bucket(bindex, sp_bits);
894
895         pagesize = 1UL << (lp_bucket ? lp_bits : sp_bits);
896
897         /* This many elements fit? */
898         taken = page_header_size(bindex / INTER_BUCKET_SPACE,
899                                  bs->elements_per_page);
900         taken += bucket_to_size(bindex) * bs->elements_per_page;
901         if (taken > pagesize)
902                 return check_fail();
903
904         /* One more wouldn't fit? */
905         taken = page_header_size(bindex / INTER_BUCKET_SPACE,
906                                  bs->elements_per_page + 1);
907         taken += bucket_to_size(bindex) * (bs->elements_per_page + 1);
908         if (taken <= pagesize)
909                 return check_fail();
910
911         /* Walk used list. */
912         prev = 0;
913         for (i = bs->page_list; i; i = ph->next) {
914                 /* Bad pointer? */
915                 if (out_of_bounds(i, sp_bits, pagesize, poolsize))
916                         return check_fail();
917                 /* Wrong size page? */
918                 if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
919                     != lp_bucket)
920                         return check_fail();
921                 /* Large page not on boundary? */
922                 if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
923                         return check_fail();
924                 ph = from_pgnum(head, i, sp_bits);
925                 /* Linked list corrupt? */
926                 if (ph->prev != prev)
927                         return check_fail();
928                 /* Already seen this page? */
929                 if (test_bit(pages, i))
930                         return check_fail();
931                 set_bit(pages, i);
932                 /* Empty or full? */
933                 if (ph->elements_used == 0)
934                         return check_fail();
935                 if (ph->elements_used >= bs->elements_per_page)
936                         return check_fail();
937                 /* Used bits don't agree? */
938                 if (ph->elements_used != count_bits(ph->used,
939                                                     bs->elements_per_page))
940                         return check_fail();
941                 /* Wrong bucket? */
942                 if (ph->bucket != bindex)
943                         return check_fail();
944                 prev = i;
945         }
946
947         /* Walk full list. */
948         prev = 0;
949         for (i = bs->full_list; i; i = ph->next) {
950                 /* Bad pointer? */
951                 if (out_of_bounds(i, sp_bits, pagesize, poolsize))
952                         return check_fail();
953                 /* Wrong size page? */
954                 if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
955                     != lp_bucket)
956                 /* Large page not on boundary? */
957                 if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
958                         return check_fail();
959                 ph = from_pgnum(head, i, sp_bits);
960                 /* Linked list corrupt? */
961                 if (ph->prev != prev)
962                         return check_fail();
963                 /* Already seen this page? */
964                 if (test_bit(pages, i))
965                         return check_fail();
966                 set_bit(pages, i);
967                 /* Not full? */
968                 if (ph->elements_used != bs->elements_per_page)
969                         return check_fail();
970                 /* Used bits don't agree? */
971                 if (ph->elements_used != count_bits(ph->used,
972                                                     bs->elements_per_page))
973                         return check_fail();
974                 /* Wrong bucket? */
975                 if (ph->bucket != bindex)
976                         return check_fail();
977                 prev = i;
978         }
979         return true;
980 }
981
982 bool alloc_check(void *pool, unsigned long poolsize)
983 {
984         struct header *head = pool;
985         unsigned long prev, i, lp_bits, sp_bits, header_size, num_buckets;
986         struct page_header *ph;
987         struct huge_alloc *ha;
988         unsigned long pages[MAX_SMALL_PAGES / BITS_PER_LONG] = { 0 };
989
990         if (poolsize < MIN_USEFUL_SIZE)
991                 return tiny_alloc_check(pool, poolsize);
992
993         sp_bits = small_page_bits(poolsize);
994         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
995
996         num_buckets = max_bucket(lp_bits);
997
998         header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
999
1000         /* First, set all bits taken by header. */
1001         for (i = 0; i < header_size; i += (1UL << sp_bits))
1002                 set_bit(pages, i >> sp_bits);
1003
1004         /* Check small page free list. */
1005         prev = 0;
1006         for (i = head->small_free_list; i; i = ph->next) {
1007                 /* Bad pointer? */
1008                 if (out_of_bounds(i, sp_bits, 1 << sp_bits, poolsize))
1009                         return check_fail();
1010                 /* Large page? */
1011                 if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
1012                         return check_fail();
1013                 ph = from_pgnum(head, i, sp_bits);
1014                 /* Linked list corrupt? */
1015                 if (ph->prev != prev)
1016                         return check_fail();
1017                 /* Already seen this page? */
1018                 if (test_bit(pages, i))
1019                         return check_fail();
1020                 set_bit(pages, i);
1021                 prev = i;
1022         }
1023
1024         /* Check large page free list. */
1025         prev = 0;
1026         for (i = head->large_free_list; i; i = ph->next) {
1027                 /* Bad pointer? */
1028                 if (out_of_bounds(i, sp_bits, 1 << lp_bits, poolsize))
1029                         return check_fail();
1030                 /* Not large page? */
1031                 if (!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
1032                         return check_fail();
1033                 /* Not page boundary? */
1034                 if ((i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
1035                         return check_fail();
1036                 ph = from_pgnum(head, i, sp_bits);
1037                 /* Linked list corrupt? */
1038                 if (ph->prev != prev)
1039                         return check_fail();
1040                 /* Already seen this page? */
1041                 if (test_bit(pages, i))
1042                         return check_fail();
1043                 set_bit(pages, i);
1044                 prev = i;
1045         }
1046
1047         /* Check the buckets. */
1048         for (i = 0; i < max_bucket(lp_bits); i++) {
1049                 struct bucket_state *bs = &head->bs[i];
1050
1051                 if (!check_bucket(head, poolsize, pages, bs, i))
1052                         return false;
1053         }
1054
1055         /* Check the huge alloc list. */
1056         prev = 0;
1057         for (i = head->huge; i; i = ha->next) {
1058                 unsigned long pgbits, j;
1059
1060                 /* Bad pointer? */
1061                 if (i >= poolsize || i + sizeof(*ha) > poolsize)
1062                         return check_fail();
1063                 ha = (void *)((char *)head + i);
1064
1065                 /* Check contents of ha. */
1066                 if (ha->off > poolsize || ha->off + ha->len > poolsize)
1067                         return check_fail();
1068
1069                 /* Large or small page? */
1070                 pgbits = test_bit(head->pagesize, ha->off >> lp_bits)
1071                         ? lp_bits : sp_bits;
1072
1073                 /* Not page boundary? */
1074                 if ((ha->off % (1UL << pgbits)) != 0)
1075                         return check_fail();
1076
1077                 /* Not page length? */
1078                 if ((ha->len % (1UL << pgbits)) != 0)
1079                         return check_fail();
1080
1081                 /* Linked list corrupt? */
1082                 if (ha->prev != prev)
1083                         return check_fail();
1084
1085                 for (j = ha->off; j < ha->off + ha->len; j += (1 << sp_bits)) {
1086                         /* Already seen this page? */
1087                         if (test_bit(pages, j >> sp_bits))
1088                                 return check_fail();
1089                         set_bit(pages, j >> sp_bits);
1090                 }
1091
1092                 prev = i;
1093         }
1094                 
1095         /* Make sure every page accounted for. */
1096         for (i = 0; i < poolsize >> sp_bits; i++) {
1097                 if (!test_bit(pages, i))
1098                         return check_fail();
1099                 if (test_bit(head->pagesize,
1100                              i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
1101                         /* Large page, skip rest. */
1102                         i += SMALL_PAGES_PER_LARGE_PAGE - 1;
1103                 }
1104         }
1105
1106         return true;
1107 }
1108
1109 static unsigned long print_overhead(FILE *out, const char *desc,
1110                                     unsigned long bytes,
1111                                     unsigned long poolsize)
1112 {
1113         fprintf(out, "Overhead (%s): %lu bytes (%.3g%%)\n",
1114                 desc, bytes, 100.0 * bytes / poolsize);
1115         return bytes;
1116 }
1117
1118 static unsigned long count_list(struct header *head,
1119                                 u16 pgnum,
1120                                 unsigned int sp_bits,
1121                                 unsigned long *total_elems)
1122 {
1123         struct page_header *p;
1124         unsigned long ret = 0;
1125
1126         while (pgnum) {
1127                 p = from_pgnum(head, pgnum, sp_bits);
1128                 if (total_elems)
1129                         (*total_elems) += p->elements_used;
1130                 ret++;
1131                 pgnum = p->next;
1132         }
1133         return ret;
1134 }
1135
1136 static unsigned long visualize_bucket(FILE *out, struct header *head,
1137                                       unsigned int bucket,
1138                                       unsigned long poolsize,
1139                                       unsigned int sp_bits)
1140 {
1141         unsigned long num_full, num_partial, num_pages, page_size,
1142                 elems, hdr_min, hdr_size, elems_per_page, overhead = 0;
1143
1144         elems_per_page = head->bs[bucket].elements_per_page;
1145
1146         /* If we used byte-based bitmaps, we could get pg hdr to: */
1147         hdr_min = sizeof(struct page_header)
1148                 - sizeof(((struct page_header *)0)->used)
1149                 + align_up(elems_per_page, CHAR_BIT) / CHAR_BIT;
1150         hdr_size = page_header_size(bucket / INTER_BUCKET_SPACE,
1151                                     elems_per_page);
1152
1153         elems = 0;
1154         num_full = count_list(head, head->bs[bucket].full_list, sp_bits,
1155                               &elems);
1156         num_partial = count_list(head, head->bs[bucket].page_list, sp_bits,
1157                                  &elems);
1158         num_pages = num_full + num_partial;
1159         if (!num_pages)
1160                 return 0;
1161
1162         fprintf(out, "Bucket %u (%lu bytes):"
1163                 " %lu full, %lu partial = %lu elements\n",
1164                 bucket, bucket_to_size(bucket), num_full, num_partial, elems);
1165         /* Strict requirement of page header size. */
1166         overhead += print_overhead(out, "page headers",
1167                                    hdr_min * num_pages, poolsize);
1168         /* Gap between minimal page header and actual start. */
1169         overhead += print_overhead(out, "page post-header alignments",
1170                                    (hdr_size - hdr_min) * num_pages, poolsize);
1171         /* Between last element and end of page. */
1172         page_size = (1 << sp_bits);
1173         if (large_page_bucket(bucket, sp_bits))
1174                 page_size <<= BITS_FROM_SMALL_TO_LARGE_PAGE;
1175
1176         overhead += print_overhead(out, "page tails",
1177                                    (page_size - (hdr_size
1178                                                  + (elems_per_page
1179                                                     * bucket_to_size(bucket))))
1180                                    * num_pages, poolsize);
1181         return overhead;
1182 }
1183
1184 void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
1185 {
1186         struct header *head = pool;
1187         unsigned long i, lp_bits, sp_bits, header_size, num_buckets, count,
1188                 overhead = 0;
1189
1190         fprintf(out, "Pool %p size %lu: (%s allocator)\n", pool, poolsize,
1191                 poolsize < MIN_USEFUL_SIZE ? "tiny" : "standard");
1192
1193         if (poolsize < MIN_USEFUL_SIZE) {
1194                 tiny_alloc_visualize(out, pool, poolsize);
1195                 return;
1196         }
1197         
1198         sp_bits = small_page_bits(poolsize);
1199         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
1200
1201         num_buckets = max_bucket(lp_bits);
1202         header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
1203
1204         fprintf(out, "Large page size %lu, small page size %lu.\n",
1205                 1UL << lp_bits, 1UL << sp_bits);
1206         overhead += print_overhead(out, "unused pool tail",
1207                                    poolsize % (1 << lp_bits), poolsize);
1208         fprintf(out, "Main header %lu bytes (%lu small pages).\n",
1209                 header_size, align_up(header_size, 1 << sp_bits) >> sp_bits);
1210         overhead += print_overhead(out, "partial header page",
1211                                    align_up(header_size, 1 << sp_bits)
1212                                    - header_size, poolsize);
1213         /* Total large pages. */
1214         i = count_bits(head->pagesize, poolsize >> lp_bits);
1215         /* Used pages. */
1216         count = i - count_list(head, head->large_free_list, sp_bits, NULL);
1217         fprintf(out, "%lu/%lu large pages used (%.3g%%)\n",
1218                 count, i, count ? 100.0 * count / i : 0.0);
1219
1220         /* Total small pages. */
1221         i = ((poolsize >> lp_bits) - i) << BITS_FROM_SMALL_TO_LARGE_PAGE;
1222         /* Used pages */
1223         count = i - count_list(head, head->small_free_list, sp_bits, NULL);
1224         fprintf(out, "%lu/%lu small pages used (%.3g%%)\n",
1225                 count, i, count ? 100.0 * count / i : 0.0);
1226
1227         /* Summary of each bucket. */
1228         fprintf(out, "%lu buckets:\n", num_buckets);
1229         for (i = 0; i < num_buckets; i++)
1230                 overhead += visualize_bucket(out, head, i, poolsize, sp_bits);
1231
1232         print_overhead(out, "total", overhead, poolsize);
1233 }