8 #include <ccan/build_assert/build_assert.h>
9 #include <ccan/likely/likely.h>
10 #include <ccan/short_types/short_types.h>
14 Inspired by (and parts taken from) Andrew Tridgell's alloc_mmap:
15 http://samba.org/~tridge/junkcode/alloc_mmap/
17 Copyright (C) Andrew Tridgell 2007
19 This library is free software; you can redistribute it and/or
20 modify it under the terms of the GNU Lesser General Public
21 License as published by the Free Software Foundation; either
22 version 2 of the License, or (at your option) any later version.
24 This library is distributed in the hope that it will be useful,
25 but WITHOUT ANY WARRANTY; without even the implied warranty of
26 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27 Lesser General Public License for more details.
29 You should have received a copy of the GNU Lesser General Public
30 License along with this library; if not, write to the Free Software
31 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 #if 0 /* Until we have the tiny allocator working, go down to 64k */
36 /* We divide the pool into this many large pages (nearest power of 2) */
37 #define MAX_LARGE_PAGES (1024UL)
39 /* 32 small pages == 1 large page. */
40 #define BITS_FROM_SMALL_TO_LARGE_PAGE 5
44 #define MAX_LARGE_PAGES (128UL)
45 #define BITS_FROM_SMALL_TO_LARGE_PAGE 4
49 #define MAX_SMALL_PAGES (MAX_LARGE_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE)
51 /* Smallest pool size for this scheme: 128-byte small pages. That's
52 * 9/13% overhead for 32/64 bit. */
53 #define MIN_USEFUL_SIZE (MAX_SMALL_PAGES * 128)
55 /* Every 4 buckets, we jump up a power of 2. ...8 10 12 14 16 20 24 28 32... */
56 #define INTER_BUCKET_SPACE 4
58 #define SMALL_PAGES_PER_LARGE_PAGE (1 << BITS_FROM_SMALL_TO_LARGE_PAGE)
60 /* FIXME: Figure this out properly. */
61 #define MAX_SIZE (1 << 30)
63 /* How few object to fit in a page before using a larger one? (8) */
64 #define MAX_PAGE_OBJECT_ORDER 3
66 #define BITS_PER_LONG (sizeof(long) * CHAR_BIT)
69 u32 elements_per_page;
75 /* Bitmap of which pages are large. */
76 unsigned long pagesize[MAX_LARGE_PAGES / BITS_PER_LONG];
78 /* List of unused small/large pages. */
82 /* This is less defined: we have two buckets for each power of 2 */
83 struct bucket_state bs[1];
88 /* FIXME: We can just count all-0 and all-1 used[] elements. */
89 unsigned elements_used : 25;
91 unsigned long used[1]; /* One bit per element. */
94 /* 2 bit for every byte to allocate. */
95 static void tiny_alloc_init(void *pool, unsigned long poolsize)
100 static void *tiny_alloc_get(void *pool, unsigned long poolsize,
101 unsigned long size, unsigned long align)
107 static void tiny_alloc_free(void *pool, unsigned long poolsize, void *free)
112 static unsigned long tiny_alloc_size(void *pool, unsigned long poolsize,
119 static bool tiny_alloc_check(void *pool, unsigned long poolsize)
125 static unsigned int fls(unsigned long val)
127 #if HAVE_BUILTIN_CLZL
128 /* This is significantly faster! */
129 return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
135 if (!(val & 0xffff0000u)) {
139 if (!(val & 0xff000000u)) {
143 if (!(val & 0xf0000000u)) {
147 if (!(val & 0xc0000000u)) {
151 if (!(val & 0x80000000u)) {
159 /* FIXME: Move to bitops. */
160 static unsigned int ffsl(unsigned long val)
162 #if HAVE_BUILTIN_FFSL
163 /* This is significantly faster! */
164 return __builtin_ffsl(val);
170 if (sizeof(long) == sizeof(u64)) {
171 if (!(val & 0xffffffff)) {
172 /* Workaround gcc warning on 32-bit:
173 error: right shift count >= width of type */
180 if (!(val & 0xffff)) {
204 static unsigned int popcount(unsigned long val)
206 #if HAVE_BUILTIN_POPCOUNTL
207 return __builtin_popcountl(val);
209 if (sizeof(long) == sizeof(u64)) {
211 v = (v & 0x5555555555555555ULL)
212 + ((v >> 1) & 0x5555555555555555ULL);
213 v = (v & 0x3333333333333333ULL)
214 + ((v >> 1) & 0x3333333333333333ULL);
215 v = (v & 0x0F0F0F0F0F0F0F0FULL)
216 + ((v >> 1) & 0x0F0F0F0F0F0F0F0FULL);
217 v = (v & 0x00FF00FF00FF00FFULL)
218 + ((v >> 1) & 0x00FF00FF00FF00FFULL);
219 v = (v & 0x0000FFFF0000FFFFULL)
220 + ((v >> 1) & 0x0000FFFF0000FFFFULL);
221 v = (v & 0x00000000FFFFFFFFULL)
222 + ((v >> 1) & 0x00000000FFFFFFFFULL);
225 val = (val & 0x55555555ULL) + ((val >> 1) & 0x55555555ULL);
226 val = (val & 0x33333333ULL) + ((val >> 1) & 0x33333333ULL);
227 val = (val & 0x0F0F0F0FULL) + ((val >> 1) & 0x0F0F0F0FULL);
228 val = (val & 0x00FF00FFULL) + ((val >> 1) & 0x00FF00FFULL);
229 val = (val & 0x0000FFFFULL) + ((val >> 1) & 0x0000FFFFULL);
235 * Every 4 buckets, the size doubles.
236 * Between buckets, sizes increase linearly.
238 * eg. bucket 40 = 2^10 = 1024
239 * bucket 41 = 2^10 + 2^10*4 = 1024 + 256
240 * bucket 42 = 2^10 + 2^10*4 = 1024 + 512
241 * bucket 43 = 2^10 + 2^10*4 = 1024 + 768
242 * bucket 45 = 2^11 = 2048
244 * Care is taken to handle low numbered buckets, at cost of overflow.
246 static unsigned long bucket_to_size(unsigned int bucket)
248 unsigned long base = 1 << (bucket / INTER_BUCKET_SPACE);
249 return base + ((bucket % INTER_BUCKET_SPACE)
250 << (bucket / INTER_BUCKET_SPACE))
251 / INTER_BUCKET_SPACE;
256 * fls(size/2) == 3. 1 << 3 == 8, so we're 2 too large, out of a possible
257 * 8 too large. That's 1/4 of the way to the next power of 2 == 1 bucket.
259 * We make sure we round up. Note that this fails on 32 bit at size
260 * 1879048193 (around bucket 120).
262 static unsigned int size_to_bucket(unsigned long size)
264 unsigned int base = fls(size/2);
265 unsigned long overshoot;
267 overshoot = size - (1 << base);
268 return base * INTER_BUCKET_SPACE
269 + ((overshoot * INTER_BUCKET_SPACE + (1 << base)-1) >> base);
272 static unsigned int small_page_bits(unsigned long poolsize)
274 return fls(poolsize / MAX_SMALL_PAGES / 2);
277 static unsigned long align_up(unsigned long x, unsigned long align)
279 return (x + align - 1) & ~(align - 1);
282 static struct page_header *from_pgnum(struct header *head,
286 return (struct page_header *)((char *)head + (pgnum << sp_bits));
289 static u16 to_pgnum(struct header *head, void *p, unsigned sp_bits)
291 return ((char *)p - (char *)head) >> sp_bits;
294 static size_t used_size(unsigned int num_elements)
296 return align_up(num_elements, BITS_PER_LONG) / CHAR_BIT;
300 * We always align the first entry to the lower power of 2.
301 * eg. the 12-byte bucket gets 8-byte aligned. The 4096-byte bucket
302 * gets 4096-byte aligned.
304 static unsigned long page_header_size(unsigned int align_bits,
305 unsigned long num_elements)
309 size = sizeof(struct page_header)
310 - sizeof(((struct page_header *)0)->used)
311 + used_size(num_elements);
312 return align_up(size, 1 << align_bits);
315 static void add_to_list(struct header *head,
316 u16 *list, struct page_header *ph, unsigned sp_bits)
318 unsigned long h = *list, offset = to_pgnum(head, ph, sp_bits);
322 struct page_header *prev = from_pgnum(head, h, sp_bits);
323 assert(prev->prev == 0);
330 static void del_from_list(struct header *head,
331 u16 *list, struct page_header *ph, unsigned sp_bits)
337 struct page_header *prev = from_pgnum(head, ph->prev, sp_bits);
338 prev->next = ph->next;
341 struct page_header *next = from_pgnum(head, ph->next, sp_bits);
342 next->prev = ph->prev;
346 static u16 pop_from_list(struct header *head,
348 unsigned int sp_bits)
351 struct page_header *ph = from_pgnum(head, h, sp_bits);
356 from_pgnum(head, *list, sp_bits)->prev = 0;
361 static void add_small_page_to_freelist(struct header *head,
362 struct page_header *ph,
363 unsigned int sp_bits)
365 add_to_list(head, &head->small_free_list, ph, sp_bits);
368 static void add_large_page_to_freelist(struct header *head,
369 struct page_header *ph,
370 unsigned int sp_bits)
372 add_to_list(head, &head->large_free_list, ph, sp_bits);
375 static void add_to_bucket_list(struct header *head,
376 struct bucket_state *bs,
377 struct page_header *ph,
378 unsigned int sp_bits)
380 add_to_list(head, &bs->page_list, ph, sp_bits);
383 static void del_from_bucket_list(struct header *head,
384 struct bucket_state *bs,
385 struct page_header *ph,
386 unsigned int sp_bits)
388 del_from_list(head, &bs->page_list, ph, sp_bits);
391 static void del_from_bucket_full_list(struct header *head,
392 struct bucket_state *bs,
393 struct page_header *ph,
394 unsigned int sp_bits)
396 del_from_list(head, &bs->full_list, ph, sp_bits);
399 static void add_to_bucket_full_list(struct header *head,
400 struct bucket_state *bs,
401 struct page_header *ph,
402 unsigned int sp_bits)
404 add_to_list(head, &bs->full_list, ph, sp_bits);
407 static void clear_bit(unsigned long bitmap[], unsigned int off)
409 bitmap[off / BITS_PER_LONG] &= ~(1 << (off % BITS_PER_LONG));
412 static bool test_bit(const unsigned long bitmap[], unsigned int off)
414 return bitmap[off / BITS_PER_LONG] & (1 << (off % BITS_PER_LONG));
417 static void set_bit(unsigned long bitmap[], unsigned int off)
419 bitmap[off / BITS_PER_LONG] |= (1 << (off % BITS_PER_LONG));
422 /* There must be a bit to be found. */
423 static unsigned int find_free_bit(const unsigned long bitmap[])
427 for (i = 0; bitmap[i] == -1UL; i++);
428 return (i*BITS_PER_LONG) + ffsl(~bitmap[i]) - 1;
431 /* How many elements can we fit in a page? */
432 static unsigned long elements_per_page(unsigned long align_bits,
436 unsigned long num, overhead;
438 /* First approximation: no extra room for bitmap. */
439 overhead = align_up(sizeof(struct page_header), 1 << align_bits);
440 num = (psize - overhead) / esize;
442 while (page_header_size(align_bits, num) + esize * num > psize)
447 static bool large_page_bucket(unsigned int bucket, unsigned int sp_bits)
449 unsigned long max_smallsize;
451 /* Note: this doesn't take into account page header. */
452 max_smallsize = (1UL << sp_bits) >> MAX_PAGE_OBJECT_ORDER;
454 return bucket_to_size(bucket) > max_smallsize;
457 static unsigned int max_bucket(unsigned int lp_bits)
459 return (lp_bits - MAX_PAGE_OBJECT_ORDER) * INTER_BUCKET_SPACE;
462 void alloc_init(void *pool, unsigned long poolsize)
464 struct header *head = pool;
465 struct page_header *ph;
466 unsigned int lp_bits, sp_bits, num_buckets;
467 unsigned long header_size, i;
469 if (poolsize < MIN_USEFUL_SIZE) {
470 tiny_alloc_init(pool, poolsize);
474 /* We rely on page numbers fitting in 16 bit. */
475 BUILD_ASSERT(MAX_SMALL_PAGES < 65536);
477 sp_bits = small_page_bits(poolsize);
478 lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
480 num_buckets = max_bucket(lp_bits);
483 header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
485 memset(head, 0, header_size);
486 for (i = 0; i < num_buckets; i++) {
487 unsigned long pagesize;
489 if (large_page_bucket(i, sp_bits))
490 pagesize = 1UL << lp_bits;
492 pagesize = 1UL << sp_bits;
494 head->bs[i].elements_per_page
495 = elements_per_page(i / INTER_BUCKET_SPACE,
500 /* They start as all large pages. */
501 memset(head->pagesize, 0xFF, sizeof(head->pagesize));
502 /* FIXME: small pages for last bit? */
504 /* Split first page into small pages. */
505 assert(header_size << (1UL << lp_bits));
506 clear_bit(head->pagesize, 0);
508 /* Skip over page(s) used by header, add rest to free list */
509 for (i = align_up(header_size, (1 << sp_bits)) >> sp_bits;
510 i < SMALL_PAGES_PER_LARGE_PAGE;
512 ph = from_pgnum(head, i, sp_bits);
513 ph->elements_used = 0;
514 add_small_page_to_freelist(head, ph, sp_bits);
517 /* Add the rest of the pages as large pages. */
518 i = SMALL_PAGES_PER_LARGE_PAGE;
519 while ((i << sp_bits) + (1 << lp_bits) <= poolsize) {
520 ph = from_pgnum(head, i, sp_bits);
521 ph->elements_used = 0;
522 add_large_page_to_freelist(head, ph, sp_bits);
523 i += SMALL_PAGES_PER_LARGE_PAGE;
527 /* A large page worth of small pages are free: delete them from free list. */
528 static void del_large_from_small_free_list(struct header *head,
529 struct page_header *ph,
530 unsigned int sp_bits)
534 for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
535 del_from_list(head, &head->small_free_list,
536 (void *)ph + (i << sp_bits),
541 static bool all_empty(struct header *head,
547 for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
548 struct page_header *ph = from_pgnum(head, pgnum + i, sp_bits);
549 if (ph->elements_used)
555 static u16 get_large_page(struct header *head, unsigned long poolsize,
556 unsigned int sp_bits)
558 unsigned int lp_bits, i, page;
560 lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
562 page = pop_from_list(head, &head->large_free_list, sp_bits);
566 /* Look for small pages to coalesce, after first large page. */
567 for (i = SMALL_PAGES_PER_LARGE_PAGE;
568 i < (poolsize >> lp_bits) << BITS_FROM_SMALL_TO_LARGE_PAGE;
569 i += SMALL_PAGES_PER_LARGE_PAGE) {
570 /* Already a large page? */
571 if (test_bit(head->pagesize, i / SMALL_PAGES_PER_LARGE_PAGE))
573 if (all_empty(head, i, sp_bits)) {
574 struct page_header *ph = from_pgnum(head, i, sp_bits);
575 set_bit(head->pagesize,
576 i / SMALL_PAGES_PER_LARGE_PAGE);
577 del_large_from_small_free_list(head, ph, sp_bits);
578 add_large_page_to_freelist(head, ph, sp_bits);
582 return pop_from_list(head, &head->large_free_list, sp_bits);
585 /* Returns small page. */
586 static unsigned long break_up_large_page(struct header *head,
587 unsigned int sp_bits,
592 clear_bit(head->pagesize, lpage >> BITS_FROM_SMALL_TO_LARGE_PAGE);
594 for (i = 1; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
595 struct page_header *ph = from_pgnum(head, lpage + i, sp_bits);
596 add_small_page_to_freelist(head, ph, sp_bits);
602 static u16 get_small_page(struct header *head, unsigned long poolsize,
603 unsigned int sp_bits)
607 ret = pop_from_list(head, &head->small_free_list, sp_bits);
610 ret = get_large_page(head, poolsize, sp_bits);
612 ret = break_up_large_page(head, sp_bits, ret);
616 void *alloc_get(void *pool, unsigned long poolsize,
617 unsigned long size, unsigned long align)
619 struct header *head = pool;
622 struct bucket_state *bs;
623 struct page_header *ph;
624 unsigned int sp_bits;
626 if (poolsize < MIN_USEFUL_SIZE) {
627 return tiny_alloc_get(pool, poolsize, size, align);
630 size = align_up(size, align);
633 bucket = size_to_bucket(size);
635 sp_bits = small_page_bits(poolsize);
637 if (bucket >= max_bucket(sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE)) {
638 /* FIXME: huge alloc. */
642 bs = &head->bs[bucket];
644 if (!bs->page_list) {
645 struct page_header *ph;
647 if (large_page_bucket(bucket, sp_bits))
648 bs->page_list = get_large_page(head, poolsize,
651 bs->page_list = get_small_page(head, poolsize,
653 /* FIXME: Try large-aligned alloc? Header stuffing? */
654 if (unlikely(!bs->page_list))
656 ph = from_pgnum(head, bs->page_list, sp_bits);
658 ph->elements_used = 0;
660 memset(ph->used, 0, used_size(bs->elements_per_page));
663 ph = from_pgnum(head, bs->page_list, sp_bits);
665 i = find_free_bit(ph->used);
666 set_bit(ph->used, i);
669 /* check if this page is now full */
670 if (unlikely(ph->elements_used == bs->elements_per_page)) {
671 del_from_bucket_list(head, bs, ph, sp_bits);
672 add_to_bucket_full_list(head, bs, ph, sp_bits);
675 return (char *)ph + page_header_size(ph->bucket / INTER_BUCKET_SPACE,
676 bs->elements_per_page)
677 + i * bucket_to_size(bucket);
680 void alloc_free(void *pool, unsigned long poolsize, void *free)
682 struct header *head = pool;
683 struct bucket_state *bs;
684 unsigned int sp_bits;
685 unsigned long i, pgnum, pgoffset, offset = (char *)free - (char *)pool;
687 struct page_header *ph;
689 if (poolsize < MIN_USEFUL_SIZE) {
690 return tiny_alloc_free(pool, poolsize, free);
693 /* Get page header. */
694 sp_bits = small_page_bits(poolsize);
695 pgnum = offset >> sp_bits;
697 /* Big page? Round down further. */
698 if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
700 pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
704 /* Step back to page header. */
705 ph = from_pgnum(head, pgnum, sp_bits);
706 bs = &head->bs[ph->bucket];
707 pgoffset = offset - (pgnum << sp_bits)
708 - page_header_size(ph->bucket / INTER_BUCKET_SPACE,
709 bs->elements_per_page);
711 if (unlikely(ph->elements_used == bs->elements_per_page)) {
712 del_from_bucket_full_list(head, bs, ph, sp_bits);
713 add_to_bucket_list(head, bs, ph, sp_bits);
716 /* Which element are we? */
717 i = pgoffset / bucket_to_size(ph->bucket);
718 clear_bit(ph->used, i);
721 if (unlikely(ph->elements_used == 0)) {
722 bs = &head->bs[ph->bucket];
723 del_from_bucket_list(head, bs, ph, sp_bits);
725 add_small_page_to_freelist(head, ph, sp_bits);
727 add_large_page_to_freelist(head, ph, sp_bits);
731 unsigned long alloc_size(void *pool, unsigned long poolsize, void *p)
733 struct header *head = pool;
734 unsigned int pgnum, sp_bits;
735 unsigned long offset = (char *)p - (char *)pool;
736 struct page_header *ph;
738 if (poolsize < MIN_USEFUL_SIZE)
739 return tiny_alloc_size(pool, poolsize, p);
741 /* Get page header. */
742 sp_bits = small_page_bits(poolsize);
743 pgnum = offset >> sp_bits;
745 /* Big page? Round down further. */
746 if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE))
747 pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
749 /* Step back to page header. */
750 ph = from_pgnum(head, pgnum, sp_bits);
751 return bucket_to_size(ph->bucket);
754 /* Useful for gdb breakpoints. */
755 static bool check_fail(void)
760 static unsigned long count_bits(const unsigned long bitmap[],
763 unsigned long i, count = 0;
765 while (limit >= BITS_PER_LONG) {
766 count += popcount(bitmap[0]);
768 limit -= BITS_PER_LONG;
771 for (i = 0; i < limit; i++)
772 if (test_bit(bitmap, i))
777 static bool out_of_bounds(unsigned long pgnum,
778 unsigned int sp_bits,
779 unsigned long pagesize,
780 unsigned long poolsize)
782 if (((pgnum << sp_bits) >> sp_bits) != pgnum)
785 if ((pgnum << sp_bits) > poolsize)
788 return ((pgnum << sp_bits) + pagesize > poolsize);
791 static bool check_bucket(struct header *head,
792 unsigned long poolsize,
793 unsigned long pages[],
794 struct bucket_state *bs,
798 struct page_header *ph;
799 unsigned long taken, i, prev, pagesize, sp_bits, lp_bits;
801 sp_bits = small_page_bits(poolsize);
802 lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
804 lp_bucket = large_page_bucket(bindex, sp_bits);
806 pagesize = 1UL << (lp_bucket ? lp_bits : sp_bits);
808 /* This many elements fit? */
809 taken = page_header_size(bindex / INTER_BUCKET_SPACE,
810 bs->elements_per_page);
811 taken += bucket_to_size(bindex) * bs->elements_per_page;
812 if (taken > pagesize)
815 /* One more wouldn't fit? */
816 taken = page_header_size(bindex / INTER_BUCKET_SPACE,
817 bs->elements_per_page + 1);
818 taken += bucket_to_size(bindex) * (bs->elements_per_page + 1);
819 if (taken <= pagesize)
822 /* Walk used list. */
824 for (i = bs->page_list; i; i = ph->next) {
826 if (out_of_bounds(i, sp_bits, pagesize, poolsize))
828 /* Wrong size page? */
829 if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
832 /* Large page not on boundary? */
833 if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
835 ph = from_pgnum(head, i, sp_bits);
836 /* Linked list corrupt? */
837 if (ph->prev != prev)
839 /* Already seen this page? */
840 if (test_bit(pages, i))
844 if (ph->elements_used == 0)
846 if (ph->elements_used >= bs->elements_per_page)
848 /* Used bits don't agree? */
849 if (ph->elements_used != count_bits(ph->used,
850 bs->elements_per_page))
853 if (ph->bucket != bindex)
858 /* Walk full list. */
860 for (i = bs->full_list; i; i = ph->next) {
862 if (out_of_bounds(i, sp_bits, pagesize, poolsize))
864 /* Wrong size page? */
865 if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
867 /* Large page not on boundary? */
868 if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
870 ph = from_pgnum(head, i, sp_bits);
871 /* Linked list corrupt? */
872 if (ph->prev != prev)
874 /* Already seen this page? */
875 if (test_bit(pages, i))
879 if (ph->elements_used != bs->elements_per_page)
881 /* Used bits don't agree? */
882 if (ph->elements_used != count_bits(ph->used,
883 bs->elements_per_page))
886 if (ph->bucket != bindex)
893 bool alloc_check(void *pool, unsigned long poolsize)
895 struct header *head = pool;
896 unsigned long prev, i, lp_bits, sp_bits, header_size, num_buckets;
897 struct page_header *ph;
898 unsigned long pages[MAX_SMALL_PAGES / BITS_PER_LONG] = { 0 };
900 if (poolsize < MIN_USEFUL_SIZE)
901 return tiny_alloc_check(pool, poolsize);
903 sp_bits = small_page_bits(poolsize);
904 lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
906 num_buckets = max_bucket(lp_bits);
908 header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
910 /* First, set all bits taken by header. */
911 for (i = 0; i < header_size; i += (1UL << sp_bits))
912 set_bit(pages, i >> sp_bits);
914 /* Check small page free list. */
916 for (i = head->small_free_list; i; i = ph->next) {
918 if (out_of_bounds(i, sp_bits, 1 << sp_bits, poolsize))
921 if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
923 ph = from_pgnum(head, i, sp_bits);
924 /* Linked list corrupt? */
925 if (ph->prev != prev)
927 /* Already seen this page? */
928 if (test_bit(pages, i))
934 /* Check large page free list. */
936 for (i = head->large_free_list; i; i = ph->next) {
938 if (out_of_bounds(i, sp_bits, 1 << lp_bits, poolsize))
940 /* Not large page? */
941 if (!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
943 /* Not page boundary? */
944 if ((i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
946 ph = from_pgnum(head, i, sp_bits);
947 /* Linked list corrupt? */
948 if (ph->prev != prev)
950 /* Already seen this page? */
951 if (test_bit(pages, i))
957 /* Check the buckets. */
958 for (i = 0; i < max_bucket(lp_bits); i++) {
959 struct bucket_state *bs = &head->bs[i];
961 if (!check_bucket(head, poolsize, pages, bs, i))
965 /* Make sure every page accounted for. */
966 for (i = 0; i < poolsize >> sp_bits; i++) {
967 if (!test_bit(pages, i))
969 if (test_bit(head->pagesize,
970 i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
971 /* Large page, skip rest. */
972 i += SMALL_PAGES_PER_LARGE_PAGE - 1;
979 static unsigned long print_overhead(FILE *out, const char *desc,
981 unsigned long poolsize)
983 fprintf(out, "Overhead (%s): %lu bytes (%.3g%%)\n",
984 desc, bytes, 100.0 * bytes / poolsize);
988 static unsigned long count_list(struct header *head,
990 unsigned int sp_bits,
991 unsigned long *total_elems)
993 struct page_header *p;
994 unsigned long ret = 0;
997 p = from_pgnum(head, pgnum, sp_bits);
999 (*total_elems) += p->elements_used;
1006 static unsigned long visualize_bucket(FILE *out, struct header *head,
1007 unsigned int bucket,
1008 unsigned long poolsize,
1009 unsigned int sp_bits)
1011 unsigned long num_full, num_partial, num_pages, page_size,
1012 elems, hdr_min, hdr_size, elems_per_page, overhead = 0;
1014 elems_per_page = head->bs[bucket].elements_per_page;
1016 /* If we used byte-based bitmaps, we could get pg hdr to: */
1017 hdr_min = sizeof(struct page_header)
1018 - sizeof(((struct page_header *)0)->used)
1019 + align_up(elems_per_page, CHAR_BIT) / CHAR_BIT;
1020 hdr_size = page_header_size(bucket / INTER_BUCKET_SPACE,
1024 num_full = count_list(head, head->bs[bucket].full_list, sp_bits,
1026 num_partial = count_list(head, head->bs[bucket].page_list, sp_bits,
1028 num_pages = num_full + num_partial;
1032 fprintf(out, "Bucket %u (%lu bytes):"
1033 " %lu full, %lu partial = %lu elements\n",
1034 bucket, bucket_to_size(bucket), num_full, num_partial, elems);
1035 /* Strict requirement of page header size. */
1036 overhead += print_overhead(out, "page headers",
1037 hdr_min * num_pages, poolsize);
1038 /* Gap between minimal page header and actual start. */
1039 overhead += print_overhead(out, "page post-header alignments",
1040 (hdr_size - hdr_min) * num_pages, poolsize);
1041 /* Between last element and end of page. */
1042 page_size = (1 << sp_bits);
1043 if (large_page_bucket(bucket, sp_bits))
1044 page_size <<= BITS_FROM_SMALL_TO_LARGE_PAGE;
1046 overhead += print_overhead(out, "page tails",
1047 (page_size - (hdr_size
1049 * bucket_to_size(bucket))))
1050 * num_pages, poolsize);
1054 static void tiny_alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
1058 void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
1060 struct header *head = pool;
1061 unsigned long i, lp_bits, sp_bits, header_size, num_buckets, count,
1064 fprintf(out, "Pool %p size %lu: (%s allocator)\n", pool, poolsize,
1065 poolsize < MIN_USEFUL_SIZE ? "tiny" : "standard");
1067 if (poolsize < MIN_USEFUL_SIZE) {
1068 tiny_alloc_visualize(out, pool, poolsize);
1072 sp_bits = small_page_bits(poolsize);
1073 lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
1075 num_buckets = max_bucket(lp_bits);
1076 header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
1078 fprintf(out, "Large page size %lu, small page size %lu.\n",
1079 1UL << lp_bits, 1UL << sp_bits);
1080 overhead += print_overhead(out, "unused pool tail",
1081 poolsize % (1 << lp_bits), poolsize);
1082 fprintf(out, "Main header %lu bytes (%lu small pages).\n",
1083 header_size, align_up(header_size, 1 << sp_bits) >> sp_bits);
1084 overhead += print_overhead(out, "partial header page",
1085 align_up(header_size, 1 << sp_bits)
1086 - header_size, poolsize);
1087 /* Total large pages. */
1088 i = count_bits(head->pagesize, poolsize >> lp_bits);
1090 count = i - count_list(head, head->large_free_list, sp_bits, NULL);
1091 fprintf(out, "%lu/%lu large pages used (%.3g%%)\n",
1092 count, i, count ? 100.0 * count / i : 0.0);
1094 /* Total small pages. */
1095 i = (poolsize >> lp_bits) - i;
1097 count = i - count_list(head, head->small_free_list, sp_bits, NULL);
1098 fprintf(out, "%lu/%lu small pages used (%.3g%%)\n",
1099 count, i, count ? 100.0 * count / i : 0.0);
1101 /* Summary of each bucket. */
1102 fprintf(out, "%lu buckets:\n", num_buckets);
1103 for (i = 0; i < num_buckets; i++)
1104 overhead += visualize_bucket(out, head, i, poolsize, sp_bits);
1106 print_overhead(out, "total", overhead, poolsize);