]> git.ozlabs.org Git - ccan/blob - ccan/alloc/alloc.c
tdb2: make internal coalesce() function return length coalesced.
[ccan] / ccan / alloc / alloc.c
1 #include <unistd.h>
2 #include <stdint.h>
3 #include <string.h>
4 #include <limits.h>
5 #include <assert.h>
6 #include <stdlib.h>
7 #include "alloc.h"
8 #include "bitops.h"
9 #include "tiny.h"
10 #include <ccan/build_assert/build_assert.h>
11 #include <ccan/likely/likely.h>
12 #include <ccan/alignof/alignof.h>
13 #include <ccan/short_types/short_types.h>
14 #include <ccan/compiler/compiler.h>
15 #include "config.h"
16
17 /*
18    Inspired by (and parts taken from) Andrew Tridgell's alloc_mmap:
19    http://samba.org/~tridge/junkcode/alloc_mmap/
20
21    Copyright (C) Andrew Tridgell 2007
22    
23    This library is free software; you can redistribute it and/or
24    modify it under the terms of the GNU Lesser General Public
25    License as published by the Free Software Foundation; either
26    version 2 of the License, or (at your option) any later version.
27
28    This library is distributed in the hope that it will be useful,
29    but WITHOUT ANY WARRANTY; without even the implied warranty of
30    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
31    Lesser General Public License for more details.
32
33    You should have received a copy of the GNU Lesser General Public
34    License along with this library; if not, write to the Free Software
35    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
36  */
37
38 /* We divide the pool into this many large pages (nearest power of 2) */
39 #define MAX_LARGE_PAGES (256UL)
40
41 /* 32 small pages == 1 large page. */
42 #define BITS_FROM_SMALL_TO_LARGE_PAGE 5
43
44 #define MAX_SMALL_PAGES (MAX_LARGE_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE)
45
46 /* Smallest pool size for this scheme: 128-byte small pages.  That's
47  * 9/13% overhead for 32/64 bit. */
48 #define MIN_USEFUL_SIZE (MAX_SMALL_PAGES * 128)
49
50 /* Every 4 buckets, we jump up a power of 2. ...8 10 12 14 16 20 24 28 32... */
51 #define INTER_BUCKET_SPACE 4
52
53 #define SMALL_PAGES_PER_LARGE_PAGE (1 << BITS_FROM_SMALL_TO_LARGE_PAGE)
54
55 /* FIXME: Figure this out properly. */
56 #define MAX_SIZE (1 << 30)
57
58 /* How few object to fit in a page before using a larger one? (8) */
59 #define MAX_PAGE_OBJECT_ORDER   3
60
61 #define BITS_PER_LONG (sizeof(long) * CHAR_BIT)
62
63 struct bucket_state {
64         u32 elements_per_page;
65         u16 page_list;
66         u16 full_list;
67 };
68
69 struct header {
70         /* Bitmap of which pages are large. */
71         unsigned long pagesize[MAX_LARGE_PAGES / BITS_PER_LONG];
72
73         /* List of unused small/large pages. */
74         u16 small_free_list;
75         u16 large_free_list;
76
77         /* List of huge allocs. */
78         unsigned long huge;
79
80         /* This is less defined: we have two buckets for each power of 2 */
81         struct bucket_state bs[1];
82 };
83
84 struct huge_alloc {
85         unsigned long next, prev;
86         unsigned long off, len;
87 };
88
89 struct page_header {
90         u16 next, prev;
91         /* FIXME: We can just count all-0 and all-1 used[] elements. */
92         unsigned elements_used : 25;
93         unsigned bucket : 7;
94         unsigned long used[1]; /* One bit per element. */
95 };
96
97 /*
98  * Every 4 buckets, the size doubles.
99  * Between buckets, sizes increase linearly.
100  *
101  * eg. bucket 40 = 2^10                 = 1024
102  *     bucket 41 = 2^10 + 2^10*4        = 1024 + 256
103  *     bucket 42 = 2^10 + 2^10*4        = 1024 + 512
104  *     bucket 43 = 2^10 + 2^10*4        = 1024 + 768
105  *     bucket 45 = 2^11                 = 2048
106  *
107  * Care is taken to handle low numbered buckets, at cost of overflow.
108  */
109 static unsigned long bucket_to_size(unsigned int bucket)
110 {
111         unsigned long base = 1UL << (bucket / INTER_BUCKET_SPACE);
112         return base + ((bucket % INTER_BUCKET_SPACE)
113                        << (bucket / INTER_BUCKET_SPACE))
114                 / INTER_BUCKET_SPACE;
115 }
116
117 /*
118  * Say size is 10.
119  *   fls(size/2) == 3.  1 << 3 == 8, so we're 2 too large, out of a possible
120  * 8 too large.  That's 1/4 of the way to the next power of 2 == 1 bucket.
121  *
122  * We make sure we round up.  Note that this fails on 32 bit at size
123  * 1879048193 (around bucket 120).
124  */
125 static unsigned int size_to_bucket(unsigned long size)
126 {
127         unsigned int base = afls(size/2);
128         unsigned long overshoot;
129
130         overshoot = size - (1UL << base);
131         return base * INTER_BUCKET_SPACE
132                 + ((overshoot * INTER_BUCKET_SPACE + (1UL << base)-1) >> base);
133 }
134
135 static unsigned int small_page_bits(unsigned long poolsize)
136 {
137         return afls(poolsize / MAX_SMALL_PAGES - 1);
138 }
139
140 static struct page_header *from_pgnum(struct header *head,
141                                       unsigned long pgnum,
142                                       unsigned sp_bits)
143 {
144         return (struct page_header *)((char *)head + (pgnum << sp_bits));
145 }
146
147 static u16 to_pgnum(struct header *head, void *p, unsigned sp_bits)
148 {
149         return ((char *)p - (char *)head) >> sp_bits;
150 }
151
152 static size_t used_size(unsigned int num_elements)
153 {
154         return align_up(num_elements, BITS_PER_LONG) / CHAR_BIT;
155 }
156
157 /*
158  * We always align the first entry to the lower power of 2.
159  * eg. the 12-byte bucket gets 8-byte aligned.  The 4096-byte bucket
160  * gets 4096-byte aligned.
161  */
162 static unsigned long page_header_size(unsigned int align_bits,
163                                       unsigned long num_elements)
164 {
165         unsigned long size;
166
167         size = sizeof(struct page_header)
168                 - sizeof(((struct page_header *)0)->used)
169                 + used_size(num_elements);
170         return align_up(size, 1UL << align_bits);
171 }
172
173 static void add_to_list(struct header *head,
174                         u16 *list, struct page_header *ph, unsigned sp_bits)
175 {
176         unsigned long h = *list, offset = to_pgnum(head, ph, sp_bits);
177
178         ph->next = h;
179         if (h) {
180                 struct page_header *prev = from_pgnum(head, h, sp_bits);
181                 assert(prev->prev == 0);
182                 prev->prev = offset;
183         }
184         *list = offset;
185         ph->prev = 0;
186 }
187
188 static void del_from_list(struct header *head,
189                           u16 *list, struct page_header *ph, unsigned sp_bits)
190 {
191         /* Front of list? */
192         if (ph->prev == 0) {
193                 *list = ph->next;
194         } else {
195                 struct page_header *prev = from_pgnum(head, ph->prev, sp_bits);
196                 prev->next = ph->next;
197         }
198         if (ph->next != 0) {
199                 struct page_header *next = from_pgnum(head, ph->next, sp_bits);
200                 next->prev = ph->prev;
201         }
202 }
203
204 static u16 pop_from_list(struct header *head,
205                                    u16 *list,
206                                    unsigned int sp_bits)
207 {
208         u16 h = *list;
209         struct page_header *ph = from_pgnum(head, h, sp_bits);
210
211         if (likely(h)) {
212                 *list = ph->next;
213                 if (*list)
214                         from_pgnum(head, *list, sp_bits)->prev = 0;
215         }
216         return h;
217 }
218
219 static void add_to_huge_list(struct header *head, struct huge_alloc *ha)
220 {
221         unsigned long h = head->huge;
222         unsigned long offset = (char *)ha - (char *)head;
223
224         ha->next = h;
225         if (h) {
226                 struct huge_alloc *prev = (void *)((char *)head + h);
227                 assert(prev->prev == 0);
228                 prev->prev = offset;
229         }
230         head->huge = offset;
231         ha->prev = 0;
232 }
233
234 static void del_from_huge(struct header *head, struct huge_alloc *ha)
235 {
236         /* Front of list? */
237         if (ha->prev == 0) {
238                 head->huge = ha->next;
239         } else {
240                 struct huge_alloc *prev = (void *)((char *)head + ha->prev);
241                 prev->next = ha->next;
242         }
243         if (ha->next != 0) {
244                 struct huge_alloc *next = (void *)((char *)head + ha->next);
245                 next->prev = ha->prev;
246         }
247 }
248
249 static void add_small_page_to_freelist(struct header *head,
250                                        struct page_header *ph,
251                                        unsigned int sp_bits)
252 {
253         add_to_list(head, &head->small_free_list, ph, sp_bits);
254 }
255
256 static void add_large_page_to_freelist(struct header *head,
257                                        struct page_header *ph,
258                                        unsigned int sp_bits)
259 {
260         add_to_list(head, &head->large_free_list, ph, sp_bits);
261 }
262
263 static void add_to_bucket_list(struct header *head,
264                                struct bucket_state *bs,
265                                struct page_header *ph,
266                                unsigned int sp_bits)
267 {
268         add_to_list(head, &bs->page_list, ph, sp_bits);
269 }
270
271 static void del_from_bucket_list(struct header *head,
272                                  struct bucket_state *bs,
273                                  struct page_header *ph,
274                                  unsigned int sp_bits)
275 {
276         del_from_list(head, &bs->page_list, ph, sp_bits);
277 }
278
279 static void del_from_bucket_full_list(struct header *head,
280                                       struct bucket_state *bs,
281                                       struct page_header *ph,
282                                       unsigned int sp_bits)
283 {
284         del_from_list(head, &bs->full_list, ph, sp_bits);
285 }
286
287 static void add_to_bucket_full_list(struct header *head,
288                                     struct bucket_state *bs,
289                                     struct page_header *ph,
290                                     unsigned int sp_bits)
291 {
292         add_to_list(head, &bs->full_list, ph, sp_bits);
293 }
294
295 static void clear_bit(unsigned long bitmap[], unsigned int off)
296 {
297         bitmap[off / BITS_PER_LONG] &= ~(1UL << (off % BITS_PER_LONG));
298 }
299
300 static bool test_bit(const unsigned long bitmap[], unsigned int off)
301 {
302         return bitmap[off / BITS_PER_LONG] & (1UL << (off % BITS_PER_LONG));
303 }
304
305 static void set_bit(unsigned long bitmap[], unsigned int off)
306 {
307         bitmap[off / BITS_PER_LONG] |= (1UL << (off % BITS_PER_LONG));
308 }
309
310 /* There must be a bit to be found. */
311 static unsigned int find_free_bit(const unsigned long bitmap[])
312 {
313         unsigned int i;
314
315         for (i = 0; bitmap[i] == -1UL; i++);
316         return (i*BITS_PER_LONG) + affsl(~bitmap[i]) - 1;
317 }
318
319 /* How many elements can we fit in a page? */
320 static unsigned long elements_per_page(unsigned long align_bits,
321                                        unsigned long esize,
322                                        unsigned long psize)
323 {
324         unsigned long num, overhead;
325
326         /* First approximation: no extra room for bitmap. */
327         overhead = align_up(sizeof(struct page_header), 1UL << align_bits);
328         num = (psize - overhead) / esize;
329
330         while (page_header_size(align_bits, num) + esize * num > psize)
331                 num--;
332         return num;
333 }
334
335 static bool large_page_bucket(unsigned int bucket, unsigned int sp_bits)
336 {
337         unsigned long max_smallsize;
338
339         /* Note: this doesn't take into account page header. */
340         max_smallsize = (1UL << sp_bits) >> MAX_PAGE_OBJECT_ORDER;
341
342         return bucket_to_size(bucket) > max_smallsize;
343 }
344
345 static unsigned int max_bucket(unsigned int lp_bits)
346 {
347         return (lp_bits - MAX_PAGE_OBJECT_ORDER) * INTER_BUCKET_SPACE;
348 }
349
350 void alloc_init(void *pool, unsigned long poolsize)
351 {
352         struct header *head = pool;
353         struct page_header *ph;
354         unsigned int lp_bits, sp_bits, num_buckets;
355         unsigned long header_size, i;
356
357         if (poolsize < MIN_USEFUL_SIZE) {
358                 tiny_alloc_init(pool, poolsize);
359                 return;
360         }
361
362         /* We rely on page numbers fitting in 16 bit. */
363         BUILD_ASSERT(MAX_SMALL_PAGES < 65536);
364         
365         sp_bits = small_page_bits(poolsize);
366         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
367
368         num_buckets = max_bucket(lp_bits);
369
370         head = pool;
371         header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
372
373         memset(head, 0, header_size);
374         for (i = 0; i < num_buckets; i++) {
375                 unsigned long pagesize;
376
377                 if (large_page_bucket(i, sp_bits))
378                         pagesize = 1UL << lp_bits;
379                 else
380                         pagesize = 1UL << sp_bits;
381
382                 head->bs[i].elements_per_page
383                         = elements_per_page(i / INTER_BUCKET_SPACE,
384                                             bucket_to_size(i),
385                                             pagesize);
386         }
387
388         /* They start as all large pages. */
389         memset(head->pagesize, 0xFF, sizeof(head->pagesize));
390         /* FIXME: small pages for last bit? */
391
392         /* Split first page into small pages. */
393         assert(header_size < (1UL << lp_bits));
394         clear_bit(head->pagesize, 0);
395
396         /* Skip over page(s) used by header, add rest to free list */
397         for (i = align_up(header_size, (1UL << sp_bits)) >> sp_bits;
398              i < SMALL_PAGES_PER_LARGE_PAGE;
399              i++) {
400                 ph = from_pgnum(head, i, sp_bits);
401                 ph->elements_used = 0;
402                 add_small_page_to_freelist(head, ph, sp_bits);
403         }
404
405         /* Add the rest of the pages as large pages. */
406         i = SMALL_PAGES_PER_LARGE_PAGE;
407         while ((i << sp_bits) + (1UL << lp_bits) <= poolsize) {
408                 assert(i < MAX_SMALL_PAGES);
409                 ph = from_pgnum(head, i, sp_bits);
410                 ph->elements_used = 0;
411                 add_large_page_to_freelist(head, ph, sp_bits);
412                 i += SMALL_PAGES_PER_LARGE_PAGE;
413         }
414 }
415
416 /* A large page worth of small pages are free: delete them from free list. */
417 static void del_large_from_small_free_list(struct header *head,
418                                            struct page_header *ph,
419                                            unsigned int sp_bits)
420 {
421         unsigned long i;
422
423         for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
424                 del_from_list(head, &head->small_free_list,
425                               (struct page_header *)((char *)ph
426                                                      + (i << sp_bits)),
427                               sp_bits);
428         }
429 }
430
431 static bool all_empty(struct header *head,
432                       unsigned long pgnum,
433                       unsigned sp_bits)
434 {
435         unsigned long i;
436
437         for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
438                 struct page_header *ph = from_pgnum(head, pgnum + i, sp_bits);
439                 if (ph->elements_used)
440                         return false;
441         }
442         return true;
443 }
444
445 static void recombine_small_pages(struct header *head, unsigned long poolsize,
446                                   unsigned int sp_bits)
447 {
448         unsigned long i;
449         unsigned int lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
450
451         /* Look for small pages to coalesce, after first large page. */
452         for (i = SMALL_PAGES_PER_LARGE_PAGE;
453              i < (poolsize >> lp_bits) << BITS_FROM_SMALL_TO_LARGE_PAGE;
454              i += SMALL_PAGES_PER_LARGE_PAGE) {
455                 /* Already a large page? */
456                 if (test_bit(head->pagesize, i / SMALL_PAGES_PER_LARGE_PAGE))
457                         continue;
458                 if (all_empty(head, i, sp_bits)) {
459                         struct page_header *ph = from_pgnum(head, i, sp_bits);
460                         set_bit(head->pagesize,
461                                 i / SMALL_PAGES_PER_LARGE_PAGE);
462                         del_large_from_small_free_list(head, ph, sp_bits);
463                         add_large_page_to_freelist(head, ph, sp_bits);
464                 }
465         }
466 }
467
468 static u16 get_large_page(struct header *head, unsigned long poolsize,
469                           unsigned int sp_bits)
470 {
471         unsigned int page;
472
473         page = pop_from_list(head, &head->large_free_list, sp_bits);
474         if (likely(page))
475                 return page;
476
477         recombine_small_pages(head, poolsize, sp_bits);
478
479         return pop_from_list(head, &head->large_free_list, sp_bits);
480 }
481
482 /* Returns small page. */
483 static unsigned long break_up_large_page(struct header *head,
484                                          unsigned int sp_bits,
485                                          u16 lpage)
486 {
487         unsigned int i;
488
489         clear_bit(head->pagesize, lpage >> BITS_FROM_SMALL_TO_LARGE_PAGE);
490
491         for (i = 1; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
492                 struct page_header *ph = from_pgnum(head, lpage + i, sp_bits);
493                 /* Initialize this: huge_alloc reads it. */
494                 ph->elements_used = 0;
495                 add_small_page_to_freelist(head, ph, sp_bits);
496         }
497
498         return lpage;
499 }
500
501 static u16 get_small_page(struct header *head, unsigned long poolsize,
502                           unsigned int sp_bits)
503 {
504         u16 ret;
505
506         ret = pop_from_list(head, &head->small_free_list, sp_bits);
507         if (likely(ret))
508                 return ret;
509         ret = get_large_page(head, poolsize, sp_bits);
510         if (likely(ret))
511                 ret = break_up_large_page(head, sp_bits, ret);
512         return ret;
513 }
514
515 static bool huge_allocated(struct header *head, unsigned long offset)
516 {
517         unsigned long i;
518         struct huge_alloc *ha;
519
520         for (i = head->huge; i; i = ha->next) {
521                 ha = (void *)((char *)head + i);
522                 if (ha->off <= offset && ha->off + ha->len > offset)
523                         return true;
524         }
525         return false;
526 }
527
528 /* They want something really big.  Aim for contiguous pages (slow). */
529 static COLD void *huge_alloc(void *pool, unsigned long poolsize,
530                              unsigned long size, unsigned long align)
531 {
532         struct header *head = pool;
533         struct huge_alloc *ha;
534         unsigned long i, sp_bits, lp_bits, num, header_size;
535
536         sp_bits = small_page_bits(poolsize);
537         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
538
539         /* Allocate tracking structure optimistically. */
540         ha = alloc_get(pool, poolsize, sizeof(*ha), ALIGNOF(*ha));
541         if (!ha)
542                 return NULL;
543
544         /* First search for contiguous small pages... */
545         header_size = sizeof(*head) + sizeof(head->bs) * (max_bucket(lp_bits)-1);
546
547         num = 0;
548         for (i = (header_size + (1UL << sp_bits) - 1) >> sp_bits;
549              i << sp_bits < poolsize;
550              i++) {
551                 struct page_header *pg;
552                 unsigned long off = (i << sp_bits);
553
554                 /* Skip over large pages. */
555                 if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
556                         i += (1UL << BITS_FROM_SMALL_TO_LARGE_PAGE)-1;
557                         continue;
558                 }
559
560                 /* Does this page meet alignment requirements? */
561                 if (!num && off % align != 0)
562                         continue;
563
564                 /* FIXME: This makes us O(n^2). */
565                 if (huge_allocated(head, off)) {
566                         num = 0;
567                         continue;
568                 }
569
570                 pg = (struct page_header *)((char *)head + off);
571                 if (pg->elements_used) {
572                         num = 0;
573                         continue;
574                 }
575
576                 num++;
577                 if (num << sp_bits >= size) {
578                         unsigned long pgnum;
579
580                         /* Remove from free list. */
581                         for (pgnum = i; pgnum > i - num; pgnum--) {
582                                 pg = from_pgnum(head, pgnum, sp_bits);
583                                 del_from_list(head,
584                                               &head->small_free_list,
585                                               pg, sp_bits);
586                         }
587                         ha->off = (i - num + 1) << sp_bits;
588                         ha->len = num << sp_bits;
589                         goto done;
590                 }
591         }
592
593         /* Now search for large pages... */
594         recombine_small_pages(head, poolsize, sp_bits);
595
596         num = 0;
597         for (i = (header_size + (1UL << lp_bits) - 1) >> lp_bits;
598              (i << lp_bits) < poolsize; i++) {
599                 struct page_header *pg;
600                 unsigned long off = (i << lp_bits);
601
602                 /* Ignore small pages. */
603                 if (!test_bit(head->pagesize, i))
604                         continue;
605
606                 /* Does this page meet alignment requirements? */
607                 if (!num && off % align != 0)
608                         continue;
609
610                 /* FIXME: This makes us O(n^2). */
611                 if (huge_allocated(head, off)) {
612                         num = 0;
613                         continue;
614                 }
615
616                 pg = (struct page_header *)((char *)head + off);
617                 if (pg->elements_used) {
618                         num = 0;
619                         continue;
620                 }
621
622                 num++;
623                 if (num << lp_bits >= size) {
624                         unsigned long pgnum;
625
626                         /* Remove from free list. */
627                         for (pgnum = i; pgnum > i - num; pgnum--) {
628                                 pg = from_pgnum(head, pgnum, lp_bits);
629                                 del_from_list(head,
630                                               &head->large_free_list,
631                                               pg, sp_bits);
632                         }
633                         ha->off = (i - num + 1) << lp_bits;
634                         ha->len = num << lp_bits;
635                         goto done;
636                 }
637         }
638
639         /* Unable to satisfy: free huge alloc structure. */
640         alloc_free(pool, poolsize, ha);
641         return NULL;
642
643 done:
644         add_to_huge_list(pool, ha);
645         return (char *)pool + ha->off;
646 }
647
648 static COLD void
649 huge_free(struct header *head, unsigned long poolsize, void *free)
650 {
651         unsigned long i, off, pgnum, free_off = (char *)free - (char *)head;
652         unsigned int sp_bits, lp_bits;
653         struct huge_alloc *ha;
654
655         for (i = head->huge; i; i = ha->next) {
656                 ha = (void *)((char *)head + i);
657                 if (free_off == ha->off)
658                         break;
659         }
660         assert(i);
661
662         /* Free up all the pages, delete and free ha */
663         sp_bits = small_page_bits(poolsize);
664         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
665         pgnum = free_off >> sp_bits;
666
667         if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
668                 for (off = ha->off;
669                      off < ha->off + ha->len;
670                      off += 1UL << lp_bits) {
671                         add_large_page_to_freelist(head,
672                                                    (void *)((char *)head + off),
673                                                    sp_bits);
674                 }
675         } else {
676                 for (off = ha->off;
677                      off < ha->off + ha->len;
678                      off += 1UL << sp_bits) {
679                         add_small_page_to_freelist(head,
680                                                    (void *)((char *)head + off),
681                                                    sp_bits);
682                 }
683         }
684         del_from_huge(head, ha);
685         alloc_free(head, poolsize, ha);
686 }
687
688 static COLD unsigned long huge_size(struct header *head, void *p)
689 {
690         unsigned long i, off = (char *)p - (char *)head;
691         struct huge_alloc *ha;
692
693         for (i = head->huge; i; i = ha->next) {
694                 ha = (void *)((char *)head + i);
695                 if (off == ha->off) {
696                         return ha->len;
697                 }
698         }
699         abort();
700 }
701
702 void *alloc_get(void *pool, unsigned long poolsize,
703                 unsigned long size, unsigned long align)
704 {
705         struct header *head = pool;
706         unsigned int bucket;
707         unsigned long i;
708         struct bucket_state *bs;
709         struct page_header *ph;
710         unsigned int sp_bits;
711
712         if (poolsize < MIN_USEFUL_SIZE) {
713                 return tiny_alloc_get(pool, poolsize, size, align);
714         }
715
716         size = align_up(size, align);
717         if (unlikely(!size))
718                 size = 1;
719         bucket = size_to_bucket(size);
720
721         sp_bits = small_page_bits(poolsize);
722
723         if (bucket >= max_bucket(sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE)) {
724                 return huge_alloc(pool, poolsize, size, align);
725         }
726
727         bs = &head->bs[bucket];
728
729         if (!bs->page_list) {
730                 struct page_header *ph;
731
732                 if (large_page_bucket(bucket, sp_bits))
733                         bs->page_list = get_large_page(head, poolsize,
734                                                        sp_bits);
735                 else
736                         bs->page_list = get_small_page(head, poolsize,
737                                                        sp_bits);
738                 /* FIXME: Try large-aligned alloc?  Header stuffing? */
739                 if (unlikely(!bs->page_list))
740                         return NULL;
741                 ph = from_pgnum(head, bs->page_list, sp_bits);
742                 ph->bucket = bucket;
743                 ph->elements_used = 0;
744                 ph->next = 0;
745                 memset(ph->used, 0, used_size(bs->elements_per_page));
746         }
747
748         ph = from_pgnum(head, bs->page_list, sp_bits);
749
750         i = find_free_bit(ph->used);
751         set_bit(ph->used, i);
752         ph->elements_used++;
753
754         /* check if this page is now full */
755         if (unlikely(ph->elements_used == bs->elements_per_page)) {
756                 del_from_bucket_list(head, bs, ph, sp_bits);
757                 add_to_bucket_full_list(head, bs, ph, sp_bits);
758         }
759
760         return (char *)ph + page_header_size(ph->bucket / INTER_BUCKET_SPACE,
761                                              bs->elements_per_page)
762                + i * bucket_to_size(bucket);
763 }
764
765 void alloc_free(void *pool, unsigned long poolsize, void *free)
766 {
767         struct header *head = pool;
768         struct bucket_state *bs;
769         unsigned int sp_bits;
770         unsigned long i, pgnum, pgoffset, offset = (char *)free - (char *)pool;
771         bool smallpage;
772         struct page_header *ph;
773
774         if (poolsize < MIN_USEFUL_SIZE) {
775                 return tiny_alloc_free(pool, poolsize, free);
776         }
777         
778         /* Get page header. */
779         sp_bits = small_page_bits(poolsize);
780         pgnum = offset >> sp_bits;
781
782         /* Big page? Round down further. */
783         if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
784                 smallpage = false;
785                 pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
786         } else
787                 smallpage = true;
788
789         /* Step back to page header. */
790         ph = from_pgnum(head, pgnum, sp_bits);
791         if ((void *)ph == free) {
792                 huge_free(head, poolsize, free);
793                 return;
794         }
795
796         bs = &head->bs[ph->bucket];
797         pgoffset = offset - (pgnum << sp_bits)
798                 - page_header_size(ph->bucket / INTER_BUCKET_SPACE,
799                                    bs->elements_per_page);
800
801         if (unlikely(ph->elements_used == bs->elements_per_page)) {
802                 del_from_bucket_full_list(head, bs, ph, sp_bits);
803                 add_to_bucket_list(head, bs, ph, sp_bits);
804         }
805
806         /* Which element are we? */
807         i = pgoffset / bucket_to_size(ph->bucket);
808         clear_bit(ph->used, i);
809         ph->elements_used--;
810
811         if (unlikely(ph->elements_used == 0)) {
812                 bs = &head->bs[ph->bucket];
813                 del_from_bucket_list(head, bs, ph, sp_bits);
814                 if (smallpage)
815                         add_small_page_to_freelist(head, ph, sp_bits);
816                 else
817                         add_large_page_to_freelist(head, ph, sp_bits);
818         }
819 }
820
821 unsigned long alloc_size(void *pool, unsigned long poolsize, void *p)
822 {
823         struct header *head = pool;
824         unsigned int pgnum, sp_bits;
825         unsigned long offset = (char *)p - (char *)pool;
826         struct page_header *ph;
827
828         if (poolsize < MIN_USEFUL_SIZE)
829                 return tiny_alloc_size(pool, poolsize, p);
830
831         /* Get page header. */
832         sp_bits = small_page_bits(poolsize);
833         pgnum = offset >> sp_bits;
834
835         /* Big page? Round down further. */
836         if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE))
837                 pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
838
839         /* Step back to page header. */
840         ph = from_pgnum(head, pgnum, sp_bits);
841         if ((void *)ph == p)
842                 return huge_size(head, p);
843
844         return bucket_to_size(ph->bucket);
845 }
846
847 /* Useful for gdb breakpoints. */
848 static bool check_fail(void)
849 {
850         return false;
851 }
852
853 static unsigned long count_bits(const unsigned long bitmap[],
854                                 unsigned long limit)
855 {
856         unsigned long i, count = 0;
857
858         while (limit >= BITS_PER_LONG) {
859                 count += popcount(bitmap[0]);
860                 bitmap++;
861                 limit -= BITS_PER_LONG;
862         }
863
864         for (i = 0; i < limit; i++)
865                 if (test_bit(bitmap, i))
866                         count++;
867         return count;
868 }
869
870 static bool out_of_bounds(unsigned long pgnum,
871                           unsigned int sp_bits,
872                           unsigned long pagesize,
873                           unsigned long poolsize)
874 {
875         if (((pgnum << sp_bits) >> sp_bits) != pgnum)
876                 return true;
877
878         if ((pgnum << sp_bits) > poolsize)
879                 return true;
880
881         return ((pgnum << sp_bits) + pagesize > poolsize);
882 }
883
884 static bool check_bucket(struct header *head,
885                          unsigned long poolsize,
886                          unsigned long pages[],
887                          struct bucket_state *bs,
888                          unsigned int bindex)
889 {
890         bool lp_bucket;
891         struct page_header *ph;
892         unsigned long taken, i, prev, pagesize, sp_bits, lp_bits;
893
894         sp_bits = small_page_bits(poolsize);
895         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
896
897         lp_bucket = large_page_bucket(bindex, sp_bits);
898
899         pagesize = 1UL << (lp_bucket ? lp_bits : sp_bits);
900
901         /* This many elements fit? */
902         taken = page_header_size(bindex / INTER_BUCKET_SPACE,
903                                  bs->elements_per_page);
904         taken += bucket_to_size(bindex) * bs->elements_per_page;
905         if (taken > pagesize)
906                 return check_fail();
907
908         /* One more wouldn't fit? */
909         taken = page_header_size(bindex / INTER_BUCKET_SPACE,
910                                  bs->elements_per_page + 1);
911         taken += bucket_to_size(bindex) * (bs->elements_per_page + 1);
912         if (taken <= pagesize)
913                 return check_fail();
914
915         /* Walk used list. */
916         prev = 0;
917         for (i = bs->page_list; i; i = ph->next) {
918                 /* Bad pointer? */
919                 if (out_of_bounds(i, sp_bits, pagesize, poolsize))
920                         return check_fail();
921                 /* Wrong size page? */
922                 if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
923                     != lp_bucket)
924                         return check_fail();
925                 /* Large page not on boundary? */
926                 if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
927                         return check_fail();
928                 ph = from_pgnum(head, i, sp_bits);
929                 /* Linked list corrupt? */
930                 if (ph->prev != prev)
931                         return check_fail();
932                 /* Already seen this page? */
933                 if (test_bit(pages, i))
934                         return check_fail();
935                 set_bit(pages, i);
936                 /* Empty or full? */
937                 if (ph->elements_used == 0)
938                         return check_fail();
939                 if (ph->elements_used >= bs->elements_per_page)
940                         return check_fail();
941                 /* Used bits don't agree? */
942                 if (ph->elements_used != count_bits(ph->used,
943                                                     bs->elements_per_page))
944                         return check_fail();
945                 /* Wrong bucket? */
946                 if (ph->bucket != bindex)
947                         return check_fail();
948                 prev = i;
949         }
950
951         /* Walk full list. */
952         prev = 0;
953         for (i = bs->full_list; i; i = ph->next) {
954                 /* Bad pointer? */
955                 if (out_of_bounds(i, sp_bits, pagesize, poolsize))
956                         return check_fail();
957                 /* Wrong size page? */
958                 if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
959                     != lp_bucket)
960                 /* Large page not on boundary? */
961                 if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
962                         return check_fail();
963                 ph = from_pgnum(head, i, sp_bits);
964                 /* Linked list corrupt? */
965                 if (ph->prev != prev)
966                         return check_fail();
967                 /* Already seen this page? */
968                 if (test_bit(pages, i))
969                         return check_fail();
970                 set_bit(pages, i);
971                 /* Not full? */
972                 if (ph->elements_used != bs->elements_per_page)
973                         return check_fail();
974                 /* Used bits don't agree? */
975                 if (ph->elements_used != count_bits(ph->used,
976                                                     bs->elements_per_page))
977                         return check_fail();
978                 /* Wrong bucket? */
979                 if (ph->bucket != bindex)
980                         return check_fail();
981                 prev = i;
982         }
983         return true;
984 }
985
986 bool alloc_check(void *pool, unsigned long poolsize)
987 {
988         struct header *head = pool;
989         unsigned long prev, i, lp_bits, sp_bits, header_size, num_buckets;
990         struct page_header *ph;
991         struct huge_alloc *ha;
992         unsigned long pages[MAX_SMALL_PAGES / BITS_PER_LONG] = { 0 };
993
994         if (poolsize < MIN_USEFUL_SIZE)
995                 return tiny_alloc_check(pool, poolsize);
996
997         sp_bits = small_page_bits(poolsize);
998         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
999
1000         num_buckets = max_bucket(lp_bits);
1001
1002         header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
1003
1004         /* First, set all bits taken by header. */
1005         for (i = 0; i < header_size; i += (1UL << sp_bits))
1006                 set_bit(pages, i >> sp_bits);
1007
1008         /* Check small page free list. */
1009         prev = 0;
1010         for (i = head->small_free_list; i; i = ph->next) {
1011                 /* Bad pointer? */
1012                 if (out_of_bounds(i, sp_bits, 1UL << sp_bits, poolsize))
1013                         return check_fail();
1014                 /* Large page? */
1015                 if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
1016                         return check_fail();
1017                 ph = from_pgnum(head, i, sp_bits);
1018                 /* Linked list corrupt? */
1019                 if (ph->prev != prev)
1020                         return check_fail();
1021                 /* Already seen this page? */
1022                 if (test_bit(pages, i))
1023                         return check_fail();
1024                 set_bit(pages, i);
1025                 prev = i;
1026         }
1027
1028         /* Check large page free list. */
1029         prev = 0;
1030         for (i = head->large_free_list; i; i = ph->next) {
1031                 /* Bad pointer? */
1032                 if (out_of_bounds(i, sp_bits, 1UL << lp_bits, poolsize))
1033                         return check_fail();
1034                 /* Not large page? */
1035                 if (!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
1036                         return check_fail();
1037                 /* Not page boundary? */
1038                 if ((i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
1039                         return check_fail();
1040                 ph = from_pgnum(head, i, sp_bits);
1041                 /* Linked list corrupt? */
1042                 if (ph->prev != prev)
1043                         return check_fail();
1044                 /* Already seen this page? */
1045                 if (test_bit(pages, i))
1046                         return check_fail();
1047                 set_bit(pages, i);
1048                 prev = i;
1049         }
1050
1051         /* Check the buckets. */
1052         for (i = 0; i < max_bucket(lp_bits); i++) {
1053                 struct bucket_state *bs = &head->bs[i];
1054
1055                 if (!check_bucket(head, poolsize, pages, bs, i))
1056                         return false;
1057         }
1058
1059         /* Check the huge alloc list. */
1060         prev = 0;
1061         for (i = head->huge; i; i = ha->next) {
1062                 unsigned long pgbits, j;
1063
1064                 /* Bad pointer? */
1065                 if (i >= poolsize || i + sizeof(*ha) > poolsize)
1066                         return check_fail();
1067                 ha = (void *)((char *)head + i);
1068
1069                 /* Check contents of ha. */
1070                 if (ha->off > poolsize || ha->off + ha->len > poolsize)
1071                         return check_fail();
1072
1073                 /* Large or small page? */
1074                 pgbits = test_bit(head->pagesize, ha->off >> lp_bits)
1075                         ? lp_bits : sp_bits;
1076
1077                 /* Not page boundary? */
1078                 if ((ha->off % (1UL << pgbits)) != 0)
1079                         return check_fail();
1080
1081                 /* Not page length? */
1082                 if ((ha->len % (1UL << pgbits)) != 0)
1083                         return check_fail();
1084
1085                 /* Linked list corrupt? */
1086                 if (ha->prev != prev)
1087                         return check_fail();
1088
1089                 for (j = ha->off; j < ha->off + ha->len; j += (1UL<<sp_bits)) {
1090                         /* Already seen this page? */
1091                         if (test_bit(pages, j >> sp_bits))
1092                                 return check_fail();
1093                         set_bit(pages, j >> sp_bits);
1094                 }
1095
1096                 prev = i;
1097         }
1098                 
1099         /* Make sure every page accounted for. */
1100         for (i = 0; i < poolsize >> sp_bits; i++) {
1101                 if (!test_bit(pages, i))
1102                         return check_fail();
1103                 if (test_bit(head->pagesize,
1104                              i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
1105                         /* Large page, skip rest. */
1106                         i += SMALL_PAGES_PER_LARGE_PAGE - 1;
1107                 }
1108         }
1109
1110         return true;
1111 }
1112
1113 static unsigned long print_overhead(FILE *out, const char *desc,
1114                                     unsigned long bytes,
1115                                     unsigned long poolsize)
1116 {
1117         fprintf(out, "Overhead (%s): %lu bytes (%.3g%%)\n",
1118                 desc, bytes, 100.0 * bytes / poolsize);
1119         return bytes;
1120 }
1121
1122 static unsigned long count_list(struct header *head,
1123                                 u16 pgnum,
1124                                 unsigned int sp_bits,
1125                                 unsigned long *total_elems)
1126 {
1127         struct page_header *p;
1128         unsigned long ret = 0;
1129
1130         while (pgnum) {
1131                 p = from_pgnum(head, pgnum, sp_bits);
1132                 if (total_elems)
1133                         (*total_elems) += p->elements_used;
1134                 ret++;
1135                 pgnum = p->next;
1136         }
1137         return ret;
1138 }
1139
1140 static unsigned long visualize_bucket(FILE *out, struct header *head,
1141                                       unsigned int bucket,
1142                                       unsigned long poolsize,
1143                                       unsigned int sp_bits)
1144 {
1145         unsigned long num_full, num_partial, num_pages, page_size,
1146                 elems, hdr_min, hdr_size, elems_per_page, overhead = 0;
1147
1148         elems_per_page = head->bs[bucket].elements_per_page;
1149
1150         /* If we used byte-based bitmaps, we could get pg hdr to: */
1151         hdr_min = sizeof(struct page_header)
1152                 - sizeof(((struct page_header *)0)->used)
1153                 + align_up(elems_per_page, CHAR_BIT) / CHAR_BIT;
1154         hdr_size = page_header_size(bucket / INTER_BUCKET_SPACE,
1155                                     elems_per_page);
1156
1157         elems = 0;
1158         num_full = count_list(head, head->bs[bucket].full_list, sp_bits,
1159                               &elems);
1160         num_partial = count_list(head, head->bs[bucket].page_list, sp_bits,
1161                                  &elems);
1162         num_pages = num_full + num_partial;
1163         if (!num_pages)
1164                 return 0;
1165
1166         fprintf(out, "Bucket %u (%lu bytes):"
1167                 " %lu full, %lu partial = %lu elements\n",
1168                 bucket, bucket_to_size(bucket), num_full, num_partial, elems);
1169         /* Strict requirement of page header size. */
1170         overhead += print_overhead(out, "page headers",
1171                                    hdr_min * num_pages, poolsize);
1172         /* Gap between minimal page header and actual start. */
1173         overhead += print_overhead(out, "page post-header alignments",
1174                                    (hdr_size - hdr_min) * num_pages, poolsize);
1175         /* Between last element and end of page. */
1176         page_size = (1UL << sp_bits);
1177         if (large_page_bucket(bucket, sp_bits))
1178                 page_size <<= BITS_FROM_SMALL_TO_LARGE_PAGE;
1179
1180         overhead += print_overhead(out, "page tails",
1181                                    (page_size - (hdr_size
1182                                                  + (elems_per_page
1183                                                     * bucket_to_size(bucket))))
1184                                    * num_pages, poolsize);
1185         return overhead;
1186 }
1187
1188 void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
1189 {
1190         struct header *head = pool;
1191         unsigned long i, lp_bits, sp_bits, header_size, num_buckets, count,
1192                 overhead = 0;
1193
1194         fprintf(out, "Pool %p size %lu: (%s allocator)\n", pool, poolsize,
1195                 poolsize < MIN_USEFUL_SIZE ? "tiny" : "standard");
1196
1197         if (poolsize < MIN_USEFUL_SIZE) {
1198                 tiny_alloc_visualize(out, pool, poolsize);
1199                 return;
1200         }
1201         
1202         sp_bits = small_page_bits(poolsize);
1203         lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
1204
1205         num_buckets = max_bucket(lp_bits);
1206         header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
1207
1208         fprintf(out, "Large page size %lu, small page size %lu.\n",
1209                 1UL << lp_bits, 1UL << sp_bits);
1210         overhead += print_overhead(out, "unused pool tail",
1211                                    poolsize % (1UL << lp_bits), poolsize);
1212         fprintf(out, "Main header %lu bytes (%lu small pages).\n",
1213                 header_size, align_up(header_size, 1UL << sp_bits) >> sp_bits);
1214         overhead += print_overhead(out, "partial header page",
1215                                    align_up(header_size, 1UL << sp_bits)
1216                                    - header_size, poolsize);
1217         /* Total large pages. */
1218         i = count_bits(head->pagesize, poolsize >> lp_bits);
1219         /* Used pages. */
1220         count = i - count_list(head, head->large_free_list, sp_bits, NULL);
1221         fprintf(out, "%lu/%lu large pages used (%.3g%%)\n",
1222                 count, i, count ? 100.0 * count / i : 0.0);
1223
1224         /* Total small pages. */
1225         i = ((poolsize >> lp_bits) - i) << BITS_FROM_SMALL_TO_LARGE_PAGE;
1226         /* Used pages */
1227         count = i - count_list(head, head->small_free_list, sp_bits, NULL);
1228         fprintf(out, "%lu/%lu small pages used (%.3g%%)\n",
1229                 count, i, count ? 100.0 * count / i : 0.0);
1230
1231         /* Summary of each bucket. */
1232         fprintf(out, "%lu buckets:\n", num_buckets);
1233         for (i = 0; i < num_buckets; i++)
1234                 overhead += visualize_bucket(out, head, i, poolsize, sp_bits);
1235
1236         print_overhead(out, "total", overhead, poolsize);
1237 }