8 #include <ccan/build_assert/build_assert.h>
9 #include <ccan/likely/likely.h>
10 #include <ccan/short_types/short_types.h>
14 Inspired by (and parts taken from) Andrew Tridgell's alloc_mmap:
15 http://samba.org/~tridge/junkcode/alloc_mmap/
17 Copyright (C) Andrew Tridgell 2007
19 This library is free software; you can redistribute it and/or
20 modify it under the terms of the GNU Lesser General Public
21 License as published by the Free Software Foundation; either
22 version 2 of the License, or (at your option) any later version.
24 This library is distributed in the hope that it will be useful,
25 but WITHOUT ANY WARRANTY; without even the implied warranty of
26 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27 Lesser General Public License for more details.
29 You should have received a copy of the GNU Lesser General Public
30 License along with this library; if not, write to the Free Software
31 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 #if 0 /* Until we have the tiny allocator working, go down to 1 MB */
36 /* We divide the pool into this many large pages (nearest power of 2) */
37 #define MAX_PAGES (1024UL)
39 /* 32 small pages == 1 large page. */
40 #define BITS_FROM_SMALL_TO_LARGE_PAGE 5
44 #define MAX_PAGES (128UL)
45 #define BITS_FROM_SMALL_TO_LARGE_PAGE 4
49 /* Smallest pool size for this scheme: 512-byte small pages. That's
50 * 3/5% overhead for 32/64 bit. */
51 #define MIN_USEFUL_SIZE (MAX_PAGES << (9 + BITS_FROM_SMALL_TO_LARGE_PAGE))
53 /* Every 4 buckets, we jump up a power of 2. ...8 10 12 14 16 20 24 28 32... */
54 #define INTER_BUCKET_SPACE 4
56 #define SMALL_PAGES_PER_LARGE_PAGE (1 << BITS_FROM_SMALL_TO_LARGE_PAGE)
58 /* FIXME: Figure this out properly. */
59 #define MAX_SIZE (1 << 30)
61 /* How few object to fit in a page before using a larger one? (8) */
62 #define MAX_PAGE_OBJECT_ORDER 3
64 #define BITS_PER_LONG (sizeof(long) * CHAR_BIT)
67 unsigned long elements_per_page;
73 /* Bitmap of which pages are large. */
74 unsigned long pagesize[MAX_PAGES / BITS_PER_LONG];
76 /* List of unused small/large pages. */
80 /* This is less defined: we have two buckets for each power of 2 */
81 struct bucket_state bs[1];
87 /* FIXME: Pack this in somewhere... */
89 unsigned long used[1]; /* One bit per element. */
92 /* 2 bit for every byte to allocate. */
93 static void tiny_alloc_init(void *pool, unsigned long poolsize)
98 static void *tiny_alloc_get(void *pool, unsigned long poolsize,
99 unsigned long size, unsigned long align)
105 static void tiny_alloc_free(void *pool, unsigned long poolsize, void *free)
110 static unsigned long tiny_alloc_size(void *pool, unsigned long poolsize,
117 static bool tiny_alloc_check(void *pool, unsigned long poolsize)
123 static unsigned int fls(unsigned long val)
125 #if HAVE_BUILTIN_CLZL
126 /* This is significantly faster! */
127 return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
133 if (!(val & 0xffff0000u)) {
137 if (!(val & 0xff000000u)) {
141 if (!(val & 0xf0000000u)) {
145 if (!(val & 0xc0000000u)) {
149 if (!(val & 0x80000000u)) {
157 /* FIXME: Move to bitops. */
158 static unsigned int ffsl(unsigned long val)
160 #if HAVE_BUILTIN_FFSL
161 /* This is significantly faster! */
162 return __builtin_ffsl(val);
168 if (sizeof(long) == sizeof(u64)) {
169 if (!(val & 0xffffffff)) {
170 /* Workaround gcc warning on 32-bit:
171 error: right shift count >= width of type */
178 if (!(val & 0xffff)) {
202 static unsigned int popcount(unsigned long val)
204 #if HAVE_BUILTIN_POPCOUNTL
205 return __builtin_popcountl(val);
207 if (sizeof(long) == sizeof(u64)) {
209 v = (v & 0x5555555555555555ULL)
210 + ((v >> 1) & 0x5555555555555555ULL);
211 v = (v & 0x3333333333333333ULL)
212 + ((v >> 1) & 0x3333333333333333ULL);
213 v = (v & 0x0F0F0F0F0F0F0F0FULL)
214 + ((v >> 1) & 0x0F0F0F0F0F0F0F0FULL);
215 v = (v & 0x00FF00FF00FF00FFULL)
216 + ((v >> 1) & 0x00FF00FF00FF00FFULL);
217 v = (v & 0x0000FFFF0000FFFFULL)
218 + ((v >> 1) & 0x0000FFFF0000FFFFULL);
219 v = (v & 0x00000000FFFFFFFFULL)
220 + ((v >> 1) & 0x00000000FFFFFFFFULL);
223 val = (val & 0x55555555ULL) + ((val >> 1) & 0x55555555ULL);
224 val = (val & 0x33333333ULL) + ((val >> 1) & 0x33333333ULL);
225 val = (val & 0x0F0F0F0FULL) + ((val >> 1) & 0x0F0F0F0FULL);
226 val = (val & 0x00FF00FFULL) + ((val >> 1) & 0x00FF00FFULL);
227 val = (val & 0x0000FFFFULL) + ((val >> 1) & 0x0000FFFFULL);
233 * Every 4 buckets, the size doubles.
234 * Between buckets, sizes increase linearly.
236 * eg. bucket 40 = 2^10 = 1024
237 * bucket 41 = 2^10 + 2^10*4 = 1024 + 256
238 * bucket 42 = 2^10 + 2^10*4 = 1024 + 512
239 * bucket 43 = 2^10 + 2^10*4 = 1024 + 768
240 * bucket 45 = 2^11 = 2048
242 * Care is taken to handle low numbered buckets, at cost of overflow.
244 static unsigned long bucket_to_size(unsigned int bucket)
246 unsigned long base = 1 << (bucket / INTER_BUCKET_SPACE);
247 return base + ((bucket % INTER_BUCKET_SPACE)
248 << (bucket / INTER_BUCKET_SPACE))
249 / INTER_BUCKET_SPACE;
254 * fls(size/2) == 3. 1 << 3 == 8, so we're 2 too large, out of a possible
255 * 8 too large. That's 1/4 of the way to the next power of 2 == 1 bucket.
257 * We make sure we round up. Note that this fails on 32 bit at size
258 * 1879048193 (around bucket 120).
260 static unsigned int size_to_bucket(unsigned long size)
262 unsigned int base = fls(size/2);
263 unsigned long overshoot;
265 overshoot = size - (1 << base);
266 return base * INTER_BUCKET_SPACE
267 + ((overshoot * INTER_BUCKET_SPACE + (1 << base)-1) >> base);
270 static unsigned int large_page_bits(unsigned long poolsize)
272 return fls(poolsize / MAX_PAGES / 2);
275 static unsigned long align_up(unsigned long x, unsigned long align)
277 return (x + align - 1) & ~(align - 1);
280 static struct page_header *from_pgnum(struct header *head,
284 return (struct page_header *)((char *)head + (pgnum << sp_bits));
287 static u16 to_pgnum(struct header *head, void *p, unsigned sp_bits)
289 return ((char *)p - (char *)head) >> sp_bits;
292 static size_t used_size(unsigned int num_elements)
294 return align_up(num_elements, BITS_PER_LONG) / CHAR_BIT;
298 * We always align the first entry to the lower power of 2.
299 * eg. the 12-byte bucket gets 8-byte aligned. The 4096-byte bucket
300 * gets 4096-byte aligned.
302 static unsigned long page_header_size(unsigned int align_bits,
303 unsigned long num_elements)
307 size = sizeof(struct page_header)
308 - sizeof(((struct page_header *)0)->used)
309 + used_size(num_elements);
310 return align_up(size, 1 << align_bits);
313 static void add_to_list(struct header *head,
314 u16 *list, struct page_header *ph, unsigned sp_bits)
316 unsigned long h = *list, offset = to_pgnum(head, ph, sp_bits);
320 struct page_header *prev = from_pgnum(head, h, sp_bits);
321 assert(prev->prev == 0);
328 static void del_from_list(struct header *head,
329 u16 *list, struct page_header *ph, unsigned sp_bits)
335 struct page_header *prev = from_pgnum(head, ph->prev, sp_bits);
336 prev->next = ph->next;
339 struct page_header *next = from_pgnum(head, ph->next, sp_bits);
340 next->prev = ph->prev;
344 static u16 pop_from_list(struct header *head,
346 unsigned int sp_bits)
349 struct page_header *ph = from_pgnum(head, h, sp_bits);
354 from_pgnum(head, *list, sp_bits)->prev = 0;
359 static void add_small_page_to_freelist(struct header *head,
360 struct page_header *ph,
361 unsigned int sp_bits)
363 add_to_list(head, &head->small_free_list, ph, sp_bits);
366 static void add_large_page_to_freelist(struct header *head,
367 struct page_header *ph,
368 unsigned int sp_bits)
370 add_to_list(head, &head->large_free_list, ph, sp_bits);
373 static void add_to_bucket_list(struct header *head,
374 struct bucket_state *bs,
375 struct page_header *ph,
376 unsigned int sp_bits)
378 add_to_list(head, &bs->page_list, ph, sp_bits);
381 static void del_from_bucket_list(struct header *head,
382 struct bucket_state *bs,
383 struct page_header *ph,
384 unsigned int sp_bits)
386 del_from_list(head, &bs->page_list, ph, sp_bits);
389 static void del_from_bucket_full_list(struct header *head,
390 struct bucket_state *bs,
391 struct page_header *ph,
392 unsigned int sp_bits)
394 del_from_list(head, &bs->full_list, ph, sp_bits);
397 static void add_to_bucket_full_list(struct header *head,
398 struct bucket_state *bs,
399 struct page_header *ph,
400 unsigned int sp_bits)
402 add_to_list(head, &bs->full_list, ph, sp_bits);
405 static void clear_bit(unsigned long bitmap[], unsigned int off)
407 bitmap[off / BITS_PER_LONG] &= ~(1 << (off % BITS_PER_LONG));
410 static bool test_bit(const unsigned long bitmap[], unsigned int off)
412 return bitmap[off / BITS_PER_LONG] & (1 << (off % BITS_PER_LONG));
415 static void set_bit(unsigned long bitmap[], unsigned int off)
417 bitmap[off / BITS_PER_LONG] |= (1 << (off % BITS_PER_LONG));
420 /* There must be a bit to be found. */
421 static unsigned int find_free_bit(const unsigned long bitmap[])
425 for (i = 0; bitmap[i] == -1UL; i++);
426 return (i*BITS_PER_LONG) + ffsl(~bitmap[i]) - 1;
429 /* How many elements can we fit in a page? */
430 static unsigned long elements_per_page(unsigned long align_bits,
434 unsigned long num, overhead;
436 /* First approximation: no extra room for bitmap. */
437 overhead = align_up(sizeof(struct page_header), 1 << align_bits);
438 num = (psize - overhead) / esize;
440 while (page_header_size(align_bits, num) + esize * num > psize)
445 static bool large_page_bucket(unsigned int bucket, unsigned int sp_bits)
447 unsigned long max_smallsize;
449 /* Note: this doesn't take into account page header. */
450 max_smallsize = (1UL << sp_bits) >> MAX_PAGE_OBJECT_ORDER;
452 return bucket_to_size(bucket) > max_smallsize;
455 static unsigned int max_bucket(unsigned int lp_bits)
457 return (lp_bits - MAX_PAGE_OBJECT_ORDER) * INTER_BUCKET_SPACE;
460 void alloc_init(void *pool, unsigned long poolsize)
462 struct header *head = pool;
463 struct page_header *ph;
464 unsigned int lp_bits, sp_bits, num_buckets;
465 unsigned long header_size, i;
467 if (poolsize < MIN_USEFUL_SIZE) {
468 tiny_alloc_init(pool, poolsize);
472 /* We rely on page numbers fitting in 16 bit. */
473 BUILD_ASSERT((MAX_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE) < 65536);
475 lp_bits = large_page_bits(poolsize);
476 sp_bits = lp_bits - BITS_FROM_SMALL_TO_LARGE_PAGE;
478 num_buckets = max_bucket(lp_bits);
481 header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
483 memset(head, 0, header_size);
484 for (i = 0; i < num_buckets; i++) {
485 unsigned long pagesize;
487 if (large_page_bucket(i, sp_bits))
488 pagesize = 1UL << lp_bits;
490 pagesize = 1UL << sp_bits;
492 head->bs[i].elements_per_page
493 = elements_per_page(i / INTER_BUCKET_SPACE,
498 /* They start as all large pages. */
499 memset(head->pagesize, 0xFF, sizeof(head->pagesize));
500 /* FIXME: small pages for last bit? */
502 /* Split first page into small pages. */
503 assert(header_size << (1UL << lp_bits));
504 clear_bit(head->pagesize, 0);
506 /* Skip over page(s) used by header, add rest to free list */
507 for (i = align_up(header_size, (1 << sp_bits)) >> sp_bits;
508 i < SMALL_PAGES_PER_LARGE_PAGE;
510 ph = from_pgnum(head, i, sp_bits);
511 ph->elements_used = 0;
512 add_small_page_to_freelist(head, ph, sp_bits);
515 /* Add the rest of the pages as large pages. */
516 i = SMALL_PAGES_PER_LARGE_PAGE;
517 while ((i << sp_bits) + (1 << lp_bits) <= poolsize) {
518 ph = from_pgnum(head, i, sp_bits);
519 ph->elements_used = 0;
520 add_large_page_to_freelist(head, ph, sp_bits);
521 i += SMALL_PAGES_PER_LARGE_PAGE;
525 /* A large page worth of small pages are free: delete them from free list. */
526 static void del_large_from_small_free_list(struct header *head,
527 struct page_header *ph,
528 unsigned int sp_bits)
532 for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
533 del_from_list(head, &head->small_free_list,
534 (void *)ph + (i << sp_bits),
539 static bool all_empty(struct header *head,
545 for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
546 struct page_header *ph = from_pgnum(head, pgnum + i, sp_bits);
547 if (ph->elements_used)
553 static u16 get_large_page(struct header *head, unsigned long poolsize,
554 unsigned int sp_bits)
556 unsigned int lp_bits, i, page;
558 lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
560 page = pop_from_list(head, &head->large_free_list, sp_bits);
564 /* Look for small pages to coalesce, after first large page. */
565 for (i = SMALL_PAGES_PER_LARGE_PAGE;
566 i < (poolsize >> lp_bits) << BITS_FROM_SMALL_TO_LARGE_PAGE;
567 i += SMALL_PAGES_PER_LARGE_PAGE) {
568 /* Already a large page? */
569 if (test_bit(head->pagesize, i / SMALL_PAGES_PER_LARGE_PAGE))
571 if (all_empty(head, i, sp_bits)) {
572 struct page_header *ph = from_pgnum(head, i, sp_bits);
573 set_bit(head->pagesize,
574 i / SMALL_PAGES_PER_LARGE_PAGE);
575 del_large_from_small_free_list(head, ph, sp_bits);
576 add_large_page_to_freelist(head, ph, sp_bits);
580 return pop_from_list(head, &head->large_free_list, sp_bits);
583 /* Returns small page. */
584 static unsigned long break_up_large_page(struct header *head,
585 unsigned int sp_bits,
590 clear_bit(head->pagesize, lpage >> BITS_FROM_SMALL_TO_LARGE_PAGE);
592 for (i = 1; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
593 struct page_header *ph = from_pgnum(head, lpage + i, sp_bits);
594 add_small_page_to_freelist(head, ph, sp_bits);
600 static u16 get_small_page(struct header *head, unsigned long poolsize,
601 unsigned int sp_bits)
605 ret = pop_from_list(head, &head->small_free_list, sp_bits);
608 ret = get_large_page(head, poolsize, sp_bits);
610 ret = break_up_large_page(head, sp_bits, ret);
614 void *alloc_get(void *pool, unsigned long poolsize,
615 unsigned long size, unsigned long align)
617 struct header *head = pool;
620 struct bucket_state *bs;
621 struct page_header *ph;
622 unsigned int sp_bits;
624 if (poolsize < MIN_USEFUL_SIZE) {
625 return tiny_alloc_get(pool, poolsize, size, align);
628 size = align_up(size, align);
631 bucket = size_to_bucket(size);
633 if (bucket >= max_bucket(large_page_bits(poolsize))) {
634 /* FIXME: huge alloc. */
638 bs = &head->bs[bucket];
639 sp_bits = large_page_bits(poolsize) - BITS_FROM_SMALL_TO_LARGE_PAGE;
641 if (!bs->page_list) {
642 struct page_header *ph;
644 if (large_page_bucket(bucket, sp_bits))
645 bs->page_list = get_large_page(head, poolsize,
648 bs->page_list = get_small_page(head, poolsize,
650 /* FIXME: Try large-aligned alloc? Header stuffing? */
651 if (unlikely(!bs->page_list))
653 ph = from_pgnum(head, bs->page_list, sp_bits);
655 ph->elements_used = 0;
657 memset(ph->used, 0, used_size(bs->elements_per_page));
660 ph = from_pgnum(head, bs->page_list, sp_bits);
662 i = find_free_bit(ph->used);
663 set_bit(ph->used, i);
666 /* check if this page is now full */
667 if (unlikely(ph->elements_used == bs->elements_per_page)) {
668 del_from_bucket_list(head, bs, ph, sp_bits);
669 add_to_bucket_full_list(head, bs, ph, sp_bits);
672 return (char *)ph + page_header_size(ph->bucket / INTER_BUCKET_SPACE,
673 bs->elements_per_page)
674 + i * bucket_to_size(bucket);
677 void alloc_free(void *pool, unsigned long poolsize, void *free)
679 struct header *head = pool;
680 struct bucket_state *bs;
681 unsigned int sp_bits;
682 unsigned long i, pgnum, pgoffset, offset = (char *)free - (char *)pool;
684 struct page_header *ph;
686 if (poolsize < MIN_USEFUL_SIZE) {
687 return tiny_alloc_free(pool, poolsize, free);
690 /* Get page header. */
691 sp_bits = large_page_bits(poolsize) - BITS_FROM_SMALL_TO_LARGE_PAGE;
692 pgnum = offset >> sp_bits;
694 /* Big page? Round down further. */
695 if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
697 pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
701 /* Step back to page header. */
702 ph = from_pgnum(head, pgnum, sp_bits);
703 bs = &head->bs[ph->bucket];
704 pgoffset = offset - (pgnum << sp_bits)
705 - page_header_size(ph->bucket / INTER_BUCKET_SPACE,
706 bs->elements_per_page);
708 if (unlikely(ph->elements_used == bs->elements_per_page)) {
709 del_from_bucket_full_list(head, bs, ph, sp_bits);
710 add_to_bucket_list(head, bs, ph, sp_bits);
713 /* Which element are we? */
714 i = pgoffset / bucket_to_size(ph->bucket);
715 clear_bit(ph->used, i);
718 if (unlikely(ph->elements_used == 0)) {
719 bs = &head->bs[ph->bucket];
720 del_from_bucket_list(head, bs, ph, sp_bits);
722 add_small_page_to_freelist(head, ph, sp_bits);
724 add_large_page_to_freelist(head, ph, sp_bits);
728 unsigned long alloc_size(void *pool, unsigned long poolsize, void *p)
730 struct header *head = pool;
731 unsigned int pgnum, sp_bits;
732 unsigned long offset = (char *)p - (char *)pool;
733 struct page_header *ph;
735 if (poolsize < MIN_USEFUL_SIZE)
736 return tiny_alloc_size(pool, poolsize, p);
738 /* Get page header. */
739 sp_bits = large_page_bits(poolsize) - BITS_FROM_SMALL_TO_LARGE_PAGE;
740 pgnum = offset >> sp_bits;
742 /* Big page? Round down further. */
743 if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE))
744 pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
746 /* Step back to page header. */
747 ph = from_pgnum(head, pgnum, sp_bits);
748 return bucket_to_size(ph->bucket);
751 /* Useful for gdb breakpoints. */
752 static bool check_fail(void)
757 static unsigned long count_bits(const unsigned long bitmap[],
760 unsigned long i, count = 0;
762 while (limit >= BITS_PER_LONG) {
763 count += popcount(bitmap[0]);
765 limit -= BITS_PER_LONG;
768 for (i = 0; i < limit; i++)
769 if (test_bit(bitmap, i))
774 static bool out_of_bounds(unsigned long pgnum,
775 unsigned int sp_bits,
776 unsigned long pagesize,
777 unsigned long poolsize)
779 if (((pgnum << sp_bits) >> sp_bits) != pgnum)
782 if ((pgnum << sp_bits) > poolsize)
785 return ((pgnum << sp_bits) + pagesize > poolsize);
788 static bool check_bucket(struct header *head,
789 unsigned long poolsize,
790 unsigned long pages[],
791 struct bucket_state *bs,
795 struct page_header *ph;
796 unsigned long taken, i, prev, pagesize, sp_bits, lp_bits;
798 lp_bits = large_page_bits(poolsize);
799 sp_bits = lp_bits - BITS_FROM_SMALL_TO_LARGE_PAGE;
801 lp_bucket = large_page_bucket(bindex, sp_bits);
803 pagesize = 1UL << (lp_bucket ? lp_bits : sp_bits);
805 /* This many elements fit? */
806 taken = page_header_size(bindex / INTER_BUCKET_SPACE,
807 bs->elements_per_page);
808 taken += bucket_to_size(bindex) * bs->elements_per_page;
809 if (taken > pagesize)
812 /* One more wouldn't fit? */
813 taken = page_header_size(bindex / INTER_BUCKET_SPACE,
814 bs->elements_per_page + 1);
815 taken += bucket_to_size(bindex) * (bs->elements_per_page + 1);
816 if (taken <= pagesize)
819 /* Walk used list. */
821 for (i = bs->page_list; i; i = ph->next) {
823 if (out_of_bounds(i, sp_bits, pagesize, poolsize))
825 /* Wrong size page? */
826 if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
829 /* Large page not on boundary? */
830 if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
832 ph = from_pgnum(head, i, sp_bits);
833 /* Linked list corrupt? */
834 if (ph->prev != prev)
836 /* Already seen this page? */
837 if (test_bit(pages, i))
841 if (ph->elements_used == 0)
843 if (ph->elements_used >= bs->elements_per_page)
845 /* Used bits don't agree? */
846 if (ph->elements_used != count_bits(ph->used,
847 bs->elements_per_page))
850 if (ph->bucket != bindex)
855 /* Walk full list. */
857 for (i = bs->full_list; i; i = ph->next) {
859 if (out_of_bounds(i, sp_bits, pagesize, poolsize))
861 /* Wrong size page? */
862 if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
864 /* Large page not on boundary? */
865 if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
867 ph = from_pgnum(head, i, sp_bits);
868 /* Linked list corrupt? */
869 if (ph->prev != prev)
871 /* Already seen this page? */
872 if (test_bit(pages, i))
876 if (ph->elements_used != bs->elements_per_page)
878 /* Used bits don't agree? */
879 if (ph->elements_used != count_bits(ph->used,
880 bs->elements_per_page))
883 if (ph->bucket != bindex)
890 bool alloc_check(void *pool, unsigned long poolsize)
892 struct header *head = pool;
893 unsigned long prev, i, lp_bits, sp_bits, header_size, num_buckets;
894 struct page_header *ph;
895 unsigned long pages[(MAX_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE)
896 / BITS_PER_LONG] = { 0 };
898 if (poolsize < MIN_USEFUL_SIZE)
899 return tiny_alloc_check(pool, poolsize);
901 lp_bits = large_page_bits(poolsize);
902 sp_bits = lp_bits - BITS_FROM_SMALL_TO_LARGE_PAGE;
904 num_buckets = max_bucket(lp_bits);
906 header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
908 /* First, set all bits taken by header. */
909 for (i = 0; i < header_size; i += (1UL << sp_bits))
910 set_bit(pages, i >> sp_bits);
912 /* Check small page free list. */
914 for (i = head->small_free_list; i; i = ph->next) {
916 if (out_of_bounds(i, sp_bits, 1 << sp_bits, poolsize))
919 if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
921 ph = from_pgnum(head, i, sp_bits);
922 /* Linked list corrupt? */
923 if (ph->prev != prev)
925 /* Already seen this page? */
926 if (test_bit(pages, i))
932 /* Check large page free list. */
934 for (i = head->large_free_list; i; i = ph->next) {
936 if (out_of_bounds(i, sp_bits, 1 << lp_bits, poolsize))
938 /* Not large page? */
939 if (!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
941 /* Not page boundary? */
942 if ((i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
944 ph = from_pgnum(head, i, sp_bits);
945 /* Linked list corrupt? */
946 if (ph->prev != prev)
948 /* Already seen this page? */
949 if (test_bit(pages, i))
955 /* Check the buckets. */
956 for (i = 0; i < max_bucket(lp_bits); i++) {
957 struct bucket_state *bs = &head->bs[i];
959 if (!check_bucket(head, poolsize, pages, bs, i))
963 /* Make sure every page accounted for. */
964 for (i = 0; i < poolsize >> sp_bits; i++) {
965 if (!test_bit(pages, i))
967 if (test_bit(head->pagesize,
968 i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
969 /* Large page, skip rest. */
970 i += SMALL_PAGES_PER_LARGE_PAGE - 1;
977 static unsigned long print_overhead(FILE *out, const char *desc,
979 unsigned long poolsize)
981 fprintf(out, "Overhead (%s): %lu bytes (%.3g%%)\n",
982 desc, bytes, 100.0 * bytes / poolsize);
986 static unsigned long count_list(struct header *head,
988 unsigned int sp_bits,
989 unsigned long *total_elems)
991 struct page_header *p;
992 unsigned long ret = 0;
995 p = from_pgnum(head, pgnum, sp_bits);
997 (*total_elems) += p->elements_used;
1004 static unsigned long visualize_bucket(FILE *out, struct header *head,
1005 unsigned int bucket,
1006 unsigned long poolsize,
1007 unsigned int sp_bits)
1009 unsigned long num_full, num_partial, num_pages, page_size,
1010 elems, hdr_min, hdr_size, elems_per_page, overhead = 0;
1012 elems_per_page = head->bs[bucket].elements_per_page;
1014 /* If we used byte-based bitmaps, we could get pg hdr to: */
1015 hdr_min = sizeof(struct page_header)
1016 - sizeof(((struct page_header *)0)->used)
1017 + align_up(elems_per_page, CHAR_BIT) / CHAR_BIT;
1018 hdr_size = page_header_size(bucket / INTER_BUCKET_SPACE,
1022 num_full = count_list(head, head->bs[bucket].full_list, sp_bits,
1024 num_partial = count_list(head, head->bs[bucket].page_list, sp_bits,
1026 num_pages = num_full + num_partial;
1030 fprintf(out, "Bucket %u (%lu bytes):"
1031 " %lu full, %lu partial = %lu elements\n",
1032 bucket, bucket_to_size(bucket), num_full, num_partial, elems);
1033 /* Strict requirement of page header size. */
1034 overhead += print_overhead(out, "page headers",
1035 hdr_min * num_pages, poolsize);
1036 /* Gap between minimal page header and actual start. */
1037 overhead += print_overhead(out, "page post-header alignments",
1038 (hdr_size - hdr_min) * num_pages, poolsize);
1039 /* Between last element and end of page. */
1040 page_size = (1 << sp_bits);
1041 if (large_page_bucket(bucket, sp_bits))
1042 page_size <<= BITS_FROM_SMALL_TO_LARGE_PAGE;
1044 overhead += print_overhead(out, "page tails",
1045 (page_size - (hdr_size
1047 * bucket_to_size(bucket))))
1048 * num_pages, poolsize);
1052 static void tiny_alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
1056 void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
1058 struct header *head = pool;
1059 unsigned long i, lp_bits, sp_bits, header_size, num_buckets, count,
1062 fprintf(out, "Pool %p size %lu: (%s allocator)\n", pool, poolsize,
1063 poolsize < MIN_USEFUL_SIZE ? "tiny" : "standard");
1065 if (poolsize < MIN_USEFUL_SIZE) {
1066 tiny_alloc_visualize(out, pool, poolsize);
1070 lp_bits = large_page_bits(poolsize);
1071 sp_bits = lp_bits - BITS_FROM_SMALL_TO_LARGE_PAGE;
1073 num_buckets = max_bucket(lp_bits);
1074 header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
1076 fprintf(out, "Large page size %lu, small page size %lu.\n",
1077 1UL << lp_bits, 1UL << sp_bits);
1078 overhead += print_overhead(out, "unused pool tail",
1079 poolsize % (1 << lp_bits), poolsize);
1080 fprintf(out, "Main header %lu bytes (%lu small pages).\n",
1081 header_size, align_up(header_size, 1 << sp_bits) >> sp_bits);
1082 overhead += print_overhead(out, "partial header page",
1083 align_up(header_size, 1 << sp_bits)
1084 - header_size, poolsize);
1085 /* Total large pages. */
1086 i = count_bits(head->pagesize, poolsize >> lp_bits);
1088 count = i - count_list(head, head->large_free_list, sp_bits, NULL);
1089 fprintf(out, "%lu/%lu large pages used (%.3g%%)\n",
1090 count, i, count ? 100.0 * count / i : 0.0);
1092 /* Total small pages. */
1093 i = (poolsize >> lp_bits) - i;
1095 count = i - count_list(head, head->small_free_list, sp_bits, NULL);
1096 fprintf(out, "%lu/%lu small pages used (%.3g%%)\n",
1097 count, i, count ? 100.0 * count / i : 0.0);
1099 /* Summary of each bucket. */
1100 fprintf(out, "%lu buckets:\n", num_buckets);
1101 for (i = 0; i < num_buckets; i++)
1102 overhead += visualize_bucket(out, head, i, poolsize, sp_bits);
1104 print_overhead(out, "total", overhead, poolsize);