]> git.ozlabs.org Git - ccan/blobdiff - ccan/alloc/alloc.c
cast: fix compilation with GCC's -Wcast-qual
[ccan] / ccan / alloc / alloc.c
index 12c780eb154daf6ec80300432437c6c0fcd0ea40..e9aaf3ddea8c7c7727f6a65c5ccd4a516209307c 100644 (file)
@@ -5,9 +5,13 @@
 #include <assert.h>
 #include <stdlib.h>
 #include "alloc.h"
+#include "bitops.h"
+#include "tiny.h"
 #include <ccan/build_assert/build_assert.h>
 #include <ccan/likely/likely.h>
+#include <ccan/alignof/alignof.h>
 #include <ccan/short_types/short_types.h>
+#include <ccan/compiler/compiler.h>
 #include "config.h"
 
 /*
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
-#if 0 /* Until we have the tiny allocator working, go down to 1 MB */
-
 /* We divide the pool into this many large pages (nearest power of 2) */
-#define MAX_LARGE_PAGES (1024UL)
+#define MAX_LARGE_PAGES (256UL)
 
 /* 32 small pages == 1 large page. */
 #define BITS_FROM_SMALL_TO_LARGE_PAGE 5
 
-#else
-
-#define MAX_LARGE_PAGES (128UL)
-#define BITS_FROM_SMALL_TO_LARGE_PAGE 4
-
-#endif
-
 #define MAX_SMALL_PAGES (MAX_LARGE_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE)
 
-/* Smallest pool size for this scheme: 512-byte small pages.  That's
- * 3/5% overhead for 32/64 bit. */
-#define MIN_USEFUL_SIZE (MAX_SMALL_PAGES * 512)
+/* Smallest pool size for this scheme: 128-byte small pages.  That's
+ * 9/13% overhead for 32/64 bit. */
+#define MIN_USEFUL_SIZE (MAX_SMALL_PAGES * 128)
 
 /* Every 4 buckets, we jump up a power of 2. ...8 10 12 14 16 20 24 28 32... */
 #define INTER_BUCKET_SPACE 4
@@ -66,7 +61,7 @@
 #define BITS_PER_LONG (sizeof(long) * CHAR_BIT)
 
 struct bucket_state {
-       unsigned long elements_per_page;
+       u32 elements_per_page;
        u16 page_list;
        u16 full_list;
 };
@@ -79,158 +74,26 @@ struct header {
        u16 small_free_list;
        u16 large_free_list;
 
+       /* List of huge allocs. */
+       unsigned long huge;
+
        /* This is less defined: we have two buckets for each power of 2 */
        struct bucket_state bs[1];
 };
 
+struct huge_alloc {
+       unsigned long next, prev;
+       unsigned long off, len;
+};
+
 struct page_header {
        u16 next, prev;
-       u32 elements_used;
-       /* FIXME: Pack this in somewhere... */
-       u8 bucket;
+       /* FIXME: We can just count all-0 and all-1 used[] elements. */
+       unsigned elements_used : 25;
+       unsigned bucket : 7;
        unsigned long used[1]; /* One bit per element. */
 };
 
-/* 2 bit for every byte to allocate. */
-static void tiny_alloc_init(void *pool, unsigned long poolsize)
-{
-/* FIXME */
-}
-
-static void *tiny_alloc_get(void *pool, unsigned long poolsize,
-                           unsigned long size, unsigned long align)
-{
-/* FIXME */
-       return NULL;
-}
-
-static void tiny_alloc_free(void *pool, unsigned long poolsize, void *free)
-{
-/* FIXME */
-}
-
-static unsigned long tiny_alloc_size(void *pool, unsigned long poolsize,
-                                     void *p)
-{
-/* FIXME */
-       return 0;
-}
-
-static bool tiny_alloc_check(void *pool, unsigned long poolsize)
-{
-/* FIXME */
-       return true;
-}
-
-static unsigned int fls(unsigned long val)
-{
-#if HAVE_BUILTIN_CLZL
-       /* This is significantly faster! */
-       return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
-#else
-       unsigned int r = 32;
-
-       if (!val)
-               return 0;
-       if (!(val & 0xffff0000u)) {
-               val <<= 16;
-               r -= 16;
-       }
-       if (!(val & 0xff000000u)) {
-               val <<= 8;
-               r -= 8;
-       }
-       if (!(val & 0xf0000000u)) {
-               val <<= 4;
-               r -= 4;
-       }
-       if (!(val & 0xc0000000u)) {
-               val <<= 2;
-               r -= 2;
-       }
-       if (!(val & 0x80000000u)) {
-               val <<= 1;
-               r -= 1;
-       }
-       return r;
-#endif
-}
-
-/* FIXME: Move to bitops. */
-static unsigned int ffsl(unsigned long val)
-{
-#if HAVE_BUILTIN_FFSL
-       /* This is significantly faster! */
-       return __builtin_ffsl(val);
-#else
-       unsigned int r = 1;
-
-       if (!val)
-               return 0;
-       if (sizeof(long) == sizeof(u64)) {
-               if (!(val & 0xffffffff)) {
-                       /* Workaround gcc warning on 32-bit:
-                          error: right shift count >= width of type */
-                       u64 tmp = val;
-                       tmp >>= 32;
-                       val = tmp;
-                       r += 32;
-               }
-       }
-       if (!(val & 0xffff)) {
-               val >>= 16;
-               r += 16;
-       }
-       if (!(val & 0xff)) {
-               val >>= 8;
-               r += 8;
-       }
-       if (!(val & 0xf)) {
-               val >>= 4;
-               r += 4;
-       }
-       if (!(val & 3)) {
-               val >>= 2;
-               r += 2;
-       }
-       if (!(val & 1)) {
-               val >>= 1;
-               r += 1;
-       }
-       return r;
-#endif
-}
-
-static unsigned int popcount(unsigned long val)
-{
-#if HAVE_BUILTIN_POPCOUNTL
-       return __builtin_popcountl(val);
-#else
-       if (sizeof(long) == sizeof(u64)) {
-               u64 v = val;
-               v = (v & 0x5555555555555555ULL)
-                       + ((v >> 1) & 0x5555555555555555ULL);
-               v = (v & 0x3333333333333333ULL)
-                       + ((v >> 1) & 0x3333333333333333ULL);
-               v = (v & 0x0F0F0F0F0F0F0F0FULL)
-                       + ((v >> 1) & 0x0F0F0F0F0F0F0F0FULL);
-               v = (v & 0x00FF00FF00FF00FFULL)
-                       + ((v >> 1) & 0x00FF00FF00FF00FFULL);
-               v = (v & 0x0000FFFF0000FFFFULL)
-                       + ((v >> 1) & 0x0000FFFF0000FFFFULL);
-               v = (v & 0x00000000FFFFFFFFULL)
-                       + ((v >> 1) & 0x00000000FFFFFFFFULL);
-               return v;
-       }
-       val = (val & 0x55555555ULL) + ((val >> 1) & 0x55555555ULL);
-       val = (val & 0x33333333ULL) + ((val >> 1) & 0x33333333ULL);
-       val = (val & 0x0F0F0F0FULL) + ((val >> 1) & 0x0F0F0F0FULL);
-       val = (val & 0x00FF00FFULL) + ((val >> 1) & 0x00FF00FFULL);
-       val = (val & 0x0000FFFFULL) + ((val >> 1) & 0x0000FFFFULL);
-       return val;
-#endif
-}
-
 /*
  * Every 4 buckets, the size doubles.
  * Between buckets, sizes increase linearly.
@@ -245,7 +108,7 @@ static unsigned int popcount(unsigned long val)
  */
 static unsigned long bucket_to_size(unsigned int bucket)
 {
-       unsigned long base = 1 << (bucket / INTER_BUCKET_SPACE);
+       unsigned long base = 1UL << (bucket / INTER_BUCKET_SPACE);
        return base + ((bucket % INTER_BUCKET_SPACE)
                       << (bucket / INTER_BUCKET_SPACE))
                / INTER_BUCKET_SPACE;
@@ -264,19 +127,14 @@ static unsigned int size_to_bucket(unsigned long size)
        unsigned int base = fls(size/2);
        unsigned long overshoot;
 
-       overshoot = size - (1 << base);
+       overshoot = size - (1UL << base);
        return base * INTER_BUCKET_SPACE
-               + ((overshoot * INTER_BUCKET_SPACE + (1 << base)-1) >> base);
+               + ((overshoot * INTER_BUCKET_SPACE + (1UL << base)-1) >> base);
 }
 
 static unsigned int small_page_bits(unsigned long poolsize)
 {
-       return fls(poolsize / MAX_SMALL_PAGES / 2);
-}
-
-static unsigned long align_up(unsigned long x, unsigned long align)
-{
-       return (x + align - 1) & ~(align - 1);
+       return fls(poolsize / MAX_SMALL_PAGES - 1);
 }
 
 static struct page_header *from_pgnum(struct header *head,
@@ -309,7 +167,7 @@ static unsigned long page_header_size(unsigned int align_bits,
        size = sizeof(struct page_header)
                - sizeof(((struct page_header *)0)->used)
                + used_size(num_elements);
-       return align_up(size, 1 << align_bits);
+       return align_up(size, 1UL << align_bits);
 }
 
 static void add_to_list(struct header *head,
@@ -358,6 +216,36 @@ static u16 pop_from_list(struct header *head,
        return h;
 }
 
+static void add_to_huge_list(struct header *head, struct huge_alloc *ha)
+{
+       unsigned long h = head->huge;
+       unsigned long offset = (char *)ha - (char *)head;
+
+       ha->next = h;
+       if (h) {
+               struct huge_alloc *prev = (void *)((char *)head + h);
+               assert(prev->prev == 0);
+               prev->prev = offset;
+       }
+       head->huge = offset;
+       ha->prev = 0;
+}
+
+static void del_from_huge(struct header *head, struct huge_alloc *ha)
+{
+       /* Front of list? */
+       if (ha->prev == 0) {
+               head->huge = ha->next;
+       } else {
+               struct huge_alloc *prev = (void *)((char *)head + ha->prev);
+               prev->next = ha->next;
+       }
+       if (ha->next != 0) {
+               struct huge_alloc *next = (void *)((char *)head + ha->next);
+               next->prev = ha->prev;
+       }
+}
+
 static void add_small_page_to_freelist(struct header *head,
                                       struct page_header *ph,
                                       unsigned int sp_bits)
@@ -406,17 +294,17 @@ static void add_to_bucket_full_list(struct header *head,
 
 static void clear_bit(unsigned long bitmap[], unsigned int off)
 {
-       bitmap[off / BITS_PER_LONG] &= ~(1 << (off % BITS_PER_LONG));
+       bitmap[off / BITS_PER_LONG] &= ~(1UL << (off % BITS_PER_LONG));
 }
 
 static bool test_bit(const unsigned long bitmap[], unsigned int off)
 {
-       return bitmap[off / BITS_PER_LONG] & (1 << (off % BITS_PER_LONG));
+       return bitmap[off / BITS_PER_LONG] & (1UL << (off % BITS_PER_LONG));
 }
 
 static void set_bit(unsigned long bitmap[], unsigned int off)
 {
-       bitmap[off / BITS_PER_LONG] |= (1 << (off % BITS_PER_LONG));
+       bitmap[off / BITS_PER_LONG] |= (1UL << (off % BITS_PER_LONG));
 }
 
 /* There must be a bit to be found. */
@@ -436,7 +324,7 @@ static unsigned long elements_per_page(unsigned long align_bits,
        unsigned long num, overhead;
 
        /* First approximation: no extra room for bitmap. */
-       overhead = align_up(sizeof(struct page_header), 1 << align_bits);
+       overhead = align_up(sizeof(struct page_header), 1UL << align_bits);
        num = (psize - overhead) / esize;
 
        while (page_header_size(align_bits, num) + esize * num > psize)
@@ -502,11 +390,11 @@ void alloc_init(void *pool, unsigned long poolsize)
        /* FIXME: small pages for last bit? */
 
        /* Split first page into small pages. */
-       assert(header_size << (1UL << lp_bits));
+       assert(header_size < (1UL << lp_bits));
        clear_bit(head->pagesize, 0);
 
        /* Skip over page(s) used by header, add rest to free list */
-       for (i = align_up(header_size, (1 << sp_bits)) >> sp_bits;
+       for (i = align_up(header_size, (1UL << sp_bits)) >> sp_bits;
             i < SMALL_PAGES_PER_LARGE_PAGE;
             i++) {
                ph = from_pgnum(head, i, sp_bits);
@@ -516,7 +404,8 @@ void alloc_init(void *pool, unsigned long poolsize)
 
        /* Add the rest of the pages as large pages. */
        i = SMALL_PAGES_PER_LARGE_PAGE;
-       while ((i << sp_bits) + (1 << lp_bits) <= poolsize) {
+       while ((i << sp_bits) + (1UL << lp_bits) <= poolsize) {
+               assert(i < MAX_SMALL_PAGES);
                ph = from_pgnum(head, i, sp_bits);
                ph->elements_used = 0;
                add_large_page_to_freelist(head, ph, sp_bits);
@@ -552,16 +441,11 @@ static bool all_empty(struct header *head,
        return true;
 }
 
-static u16 get_large_page(struct header *head, unsigned long poolsize,
-                         unsigned int sp_bits)
+static void recombine_small_pages(struct header *head, unsigned long poolsize,
+                                 unsigned int sp_bits)
 {
-       unsigned int lp_bits, i, page;
-
-       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
-
-       page = pop_from_list(head, &head->large_free_list, sp_bits);
-       if (likely(page))
-               return page;
+       unsigned long i;
+       unsigned int lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
 
        /* Look for small pages to coalesce, after first large page. */
        for (i = SMALL_PAGES_PER_LARGE_PAGE;
@@ -578,7 +462,19 @@ static u16 get_large_page(struct header *head, unsigned long poolsize,
                        add_large_page_to_freelist(head, ph, sp_bits);
                }
        }
-                       
+}
+
+static u16 get_large_page(struct header *head, unsigned long poolsize,
+                         unsigned int sp_bits)
+{
+       unsigned int page;
+
+       page = pop_from_list(head, &head->large_free_list, sp_bits);
+       if (likely(page))
+               return page;
+
+       recombine_small_pages(head, poolsize, sp_bits);
+
        return pop_from_list(head, &head->large_free_list, sp_bits);
 }
 
@@ -593,6 +489,8 @@ static unsigned long break_up_large_page(struct header *head,
 
        for (i = 1; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
                struct page_header *ph = from_pgnum(head, lpage + i, sp_bits);
+               /* Initialize this: huge_alloc reads it. */
+               ph->elements_used = 0;
                add_small_page_to_freelist(head, ph, sp_bits);
        }
 
@@ -613,6 +511,193 @@ static u16 get_small_page(struct header *head, unsigned long poolsize,
        return ret;
 }
 
+static bool huge_allocated(struct header *head, unsigned long offset)
+{
+       unsigned long i;
+       struct huge_alloc *ha;
+
+       for (i = head->huge; i; i = ha->next) {
+               ha = (void *)((char *)head + i);
+               if (ha->off <= offset && ha->off + ha->len > offset)
+                       return true;
+       }
+       return false;
+}
+
+/* They want something really big.  Aim for contiguous pages (slow). */
+static COLD void *huge_alloc(void *pool, unsigned long poolsize,
+                            unsigned long size, unsigned long align)
+{
+       struct header *head = pool;
+       struct huge_alloc *ha;
+       unsigned long i, sp_bits, lp_bits, num, header_size;
+
+       sp_bits = small_page_bits(poolsize);
+       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
+
+       /* Allocate tracking structure optimistically. */
+       ha = alloc_get(pool, poolsize, sizeof(*ha), ALIGNOF(*ha));
+       if (!ha)
+               return NULL;
+
+       /* First search for contiguous small pages... */
+       header_size = sizeof(*head) + sizeof(head->bs) * (max_bucket(lp_bits)-1);
+
+       num = 0;
+       for (i = (header_size + (1UL << sp_bits) - 1) >> sp_bits;
+            i << sp_bits < poolsize;
+            i++) {
+               struct page_header *pg;
+               unsigned long off = (i << sp_bits);
+
+               /* Skip over large pages. */
+               if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
+                       i += (1UL << BITS_FROM_SMALL_TO_LARGE_PAGE)-1;
+                       continue;
+               }
+
+               /* Does this page meet alignment requirements? */
+               if (!num && off % align != 0)
+                       continue;
+
+               /* FIXME: This makes us O(n^2). */
+               if (huge_allocated(head, off)) {
+                       num = 0;
+                       continue;
+               }
+
+               pg = (struct page_header *)((char *)head + off);
+               if (pg->elements_used) {
+                       num = 0;
+                       continue;
+               }
+
+               num++;
+               if (num << sp_bits >= size) {
+                       unsigned long pgnum;
+
+                       /* Remove from free list. */
+                       for (pgnum = i; pgnum > i - num; pgnum--) {
+                               pg = from_pgnum(head, pgnum, sp_bits);
+                               del_from_list(head,
+                                             &head->small_free_list,
+                                             pg, sp_bits);
+                       }
+                       ha->off = (i - num + 1) << sp_bits;
+                       ha->len = num << sp_bits;
+                       goto done;
+               }
+       }
+
+       /* Now search for large pages... */
+       recombine_small_pages(head, poolsize, sp_bits);
+
+       num = 0;
+       for (i = (header_size + (1UL << lp_bits) - 1) >> lp_bits;
+            (i << lp_bits) < poolsize; i++) {
+               struct page_header *pg;
+               unsigned long off = (i << lp_bits);
+
+               /* Ignore small pages. */
+               if (!test_bit(head->pagesize, i))
+                       continue;
+
+               /* Does this page meet alignment requirements? */
+               if (!num && off % align != 0)
+                       continue;
+
+               /* FIXME: This makes us O(n^2). */
+               if (huge_allocated(head, off)) {
+                       num = 0;
+                       continue;
+               }
+
+               pg = (struct page_header *)((char *)head + off);
+               if (pg->elements_used) {
+                       num = 0;
+                       continue;
+               }
+
+               num++;
+               if (num << lp_bits >= size) {
+                       unsigned long pgnum;
+
+                       /* Remove from free list. */
+                       for (pgnum = i; pgnum > i - num; pgnum--) {
+                               pg = from_pgnum(head, pgnum, lp_bits);
+                               del_from_list(head,
+                                             &head->large_free_list,
+                                             pg, sp_bits);
+                       }
+                       ha->off = (i - num + 1) << lp_bits;
+                       ha->len = num << lp_bits;
+                       goto done;
+               }
+       }
+
+       /* Unable to satisfy: free huge alloc structure. */
+       alloc_free(pool, poolsize, ha);
+       return NULL;
+
+done:
+       add_to_huge_list(pool, ha);
+       return (char *)pool + ha->off;
+}
+
+static COLD void
+huge_free(struct header *head, unsigned long poolsize, void *free)
+{
+       unsigned long i, off, pgnum, free_off = (char *)free - (char *)head;
+       unsigned int sp_bits, lp_bits;
+       struct huge_alloc *ha;
+
+       for (i = head->huge; i; i = ha->next) {
+               ha = (void *)((char *)head + i);
+               if (free_off == ha->off)
+                       break;
+       }
+       assert(i);
+
+       /* Free up all the pages, delete and free ha */
+       sp_bits = small_page_bits(poolsize);
+       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
+       pgnum = free_off >> sp_bits;
+
+       if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
+               for (off = ha->off;
+                    off < ha->off + ha->len;
+                    off += 1UL << lp_bits) {
+                       add_large_page_to_freelist(head,
+                                                  (void *)((char *)head + off),
+                                                  sp_bits);
+               }
+       } else {
+               for (off = ha->off;
+                    off < ha->off + ha->len;
+                    off += 1UL << sp_bits) {
+                       add_small_page_to_freelist(head,
+                                                  (void *)((char *)head + off),
+                                                  sp_bits);
+               }
+       }
+       del_from_huge(head, ha);
+       alloc_free(head, poolsize, ha);
+}
+
+static COLD unsigned long huge_size(struct header *head, void *p)
+{
+       unsigned long i, off = (char *)p - (char *)head;
+       struct huge_alloc *ha;
+
+       for (i = head->huge; i; i = ha->next) {
+               ha = (void *)((char *)head + i);
+               if (off == ha->off) {
+                       return ha->len;
+               }
+       }
+       abort();
+}
+
 void *alloc_get(void *pool, unsigned long poolsize,
                unsigned long size, unsigned long align)
 {
@@ -635,8 +720,7 @@ void *alloc_get(void *pool, unsigned long poolsize,
        sp_bits = small_page_bits(poolsize);
 
        if (bucket >= max_bucket(sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE)) {
-               /* FIXME: huge alloc. */
-               return NULL;
+               return huge_alloc(pool, poolsize, size, align);
        }
 
        bs = &head->bs[bucket];
@@ -703,6 +787,11 @@ void alloc_free(void *pool, unsigned long poolsize, void *free)
 
        /* Step back to page header. */
        ph = from_pgnum(head, pgnum, sp_bits);
+       if ((void *)ph == free) {
+               huge_free(head, poolsize, free);
+               return;
+       }
+
        bs = &head->bs[ph->bucket];
        pgoffset = offset - (pgnum << sp_bits)
                - page_header_size(ph->bucket / INTER_BUCKET_SPACE,
@@ -748,6 +837,9 @@ unsigned long alloc_size(void *pool, unsigned long poolsize, void *p)
 
        /* Step back to page header. */
        ph = from_pgnum(head, pgnum, sp_bits);
+       if ((void *)ph == p)
+               return huge_size(head, p);
+
        return bucket_to_size(ph->bucket);
 }
 
@@ -895,6 +987,7 @@ bool alloc_check(void *pool, unsigned long poolsize)
        struct header *head = pool;
        unsigned long prev, i, lp_bits, sp_bits, header_size, num_buckets;
        struct page_header *ph;
+       struct huge_alloc *ha;
        unsigned long pages[MAX_SMALL_PAGES / BITS_PER_LONG] = { 0 };
 
        if (poolsize < MIN_USEFUL_SIZE)
@@ -915,7 +1008,7 @@ bool alloc_check(void *pool, unsigned long poolsize)
        prev = 0;
        for (i = head->small_free_list; i; i = ph->next) {
                /* Bad pointer? */
-               if (out_of_bounds(i, sp_bits, 1 << sp_bits, poolsize))
+               if (out_of_bounds(i, sp_bits, 1UL << sp_bits, poolsize))
                        return check_fail();
                /* Large page? */
                if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
@@ -935,7 +1028,7 @@ bool alloc_check(void *pool, unsigned long poolsize)
        prev = 0;
        for (i = head->large_free_list; i; i = ph->next) {
                /* Bad pointer? */
-               if (out_of_bounds(i, sp_bits, 1 << lp_bits, poolsize))
+               if (out_of_bounds(i, sp_bits, 1UL << lp_bits, poolsize))
                        return check_fail();
                /* Not large page? */
                if (!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
@@ -962,6 +1055,46 @@ bool alloc_check(void *pool, unsigned long poolsize)
                        return false;
        }
 
+       /* Check the huge alloc list. */
+       prev = 0;
+       for (i = head->huge; i; i = ha->next) {
+               unsigned long pgbits, j;
+
+               /* Bad pointer? */
+               if (i >= poolsize || i + sizeof(*ha) > poolsize)
+                       return check_fail();
+               ha = (void *)((char *)head + i);
+
+               /* Check contents of ha. */
+               if (ha->off > poolsize || ha->off + ha->len > poolsize)
+                       return check_fail();
+
+               /* Large or small page? */
+               pgbits = test_bit(head->pagesize, ha->off >> lp_bits)
+                       ? lp_bits : sp_bits;
+
+               /* Not page boundary? */
+               if ((ha->off % (1UL << pgbits)) != 0)
+                       return check_fail();
+
+               /* Not page length? */
+               if ((ha->len % (1UL << pgbits)) != 0)
+                       return check_fail();
+
+               /* Linked list corrupt? */
+               if (ha->prev != prev)
+                       return check_fail();
+
+               for (j = ha->off; j < ha->off + ha->len; j += (1UL<<sp_bits)) {
+                       /* Already seen this page? */
+                       if (test_bit(pages, j >> sp_bits))
+                               return check_fail();
+                       set_bit(pages, j >> sp_bits);
+               }
+
+               prev = i;
+       }
+               
        /* Make sure every page accounted for. */
        for (i = 0; i < poolsize >> sp_bits; i++) {
                if (!test_bit(pages, i))
@@ -1039,7 +1172,7 @@ static unsigned long visualize_bucket(FILE *out, struct header *head,
        overhead += print_overhead(out, "page post-header alignments",
                                   (hdr_size - hdr_min) * num_pages, poolsize);
        /* Between last element and end of page. */
-       page_size = (1 << sp_bits);
+       page_size = (1UL << sp_bits);
        if (large_page_bucket(bucket, sp_bits))
                page_size <<= BITS_FROM_SMALL_TO_LARGE_PAGE;
 
@@ -1051,10 +1184,6 @@ static unsigned long visualize_bucket(FILE *out, struct header *head,
        return overhead;
 }
 
-static void tiny_alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
-{
-}
-
 void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
 {
        struct header *head = pool;
@@ -1078,11 +1207,11 @@ void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
        fprintf(out, "Large page size %lu, small page size %lu.\n",
                1UL << lp_bits, 1UL << sp_bits);
        overhead += print_overhead(out, "unused pool tail",
-                                  poolsize % (1 << lp_bits), poolsize);
+                                  poolsize % (1UL << lp_bits), poolsize);
        fprintf(out, "Main header %lu bytes (%lu small pages).\n",
-               header_size, align_up(header_size, 1 << sp_bits) >> sp_bits);
+               header_size, align_up(header_size, 1UL << sp_bits) >> sp_bits);
        overhead += print_overhead(out, "partial header page",
-                                  align_up(header_size, 1 << sp_bits)
+                                  align_up(header_size, 1UL << sp_bits)
                                   - header_size, poolsize);
        /* Total large pages. */
        i = count_bits(head->pagesize, poolsize >> lp_bits);
@@ -1092,7 +1221,7 @@ void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
                count, i, count ? 100.0 * count / i : 0.0);
 
        /* Total small pages. */
-       i = (poolsize >> lp_bits) - i;
+       i = ((poolsize >> lp_bits) - i) << BITS_FROM_SMALL_TO_LARGE_PAGE;
        /* Used pages */
        count = i - count_list(head, head->small_free_list, sp_bits, NULL);
        fprintf(out, "%lu/%lu small pages used (%.3g%%)\n",