alloc: move into antithread/alloc.
authorRusty Russell <rusty@rustcorp.com.au>
Thu, 22 Nov 2012 01:12:11 +0000 (11:42 +1030)
committerRusty Russell <rusty@rustcorp.com.au>
Thu, 22 Nov 2012 01:12:11 +0000 (11:42 +1030)
Our first nested module; easy because noone else relies on it.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
30 files changed:
.gitignore
Makefile
Makefile-ccan
ccan/.gitignore [new file with mode: 0644]
ccan/alloc/LICENSE [deleted symlink]
ccan/alloc/_info [deleted file]
ccan/alloc/alloc.c [deleted file]
ccan/alloc/alloc.h [deleted file]
ccan/alloc/bitops.c [deleted file]
ccan/alloc/bitops.h [deleted file]
ccan/alloc/test/run-corrupt.c [deleted file]
ccan/alloc/test/run-testsize.c [deleted file]
ccan/alloc/test/run-tiny-encode.c [deleted file]
ccan/alloc/test/run.c [deleted file]
ccan/alloc/tiny.c [deleted file]
ccan/alloc/tiny.h [deleted file]
ccan/antithread/_info
ccan/antithread/alloc/LICENSE [new symlink]
ccan/antithread/alloc/_info [new file with mode: 0644]
ccan/antithread/alloc/alloc.c [new file with mode: 0644]
ccan/antithread/alloc/alloc.h [new file with mode: 0644]
ccan/antithread/alloc/bitops.c [new file with mode: 0644]
ccan/antithread/alloc/bitops.h [new file with mode: 0644]
ccan/antithread/alloc/test/run-corrupt.c [new file with mode: 0644]
ccan/antithread/alloc/test/run-testsize.c [new file with mode: 0644]
ccan/antithread/alloc/test/run-tiny-encode.c [new file with mode: 0644]
ccan/antithread/alloc/test/run.c [new file with mode: 0644]
ccan/antithread/alloc/tiny.c [new file with mode: 0644]
ccan/antithread/alloc/tiny.h [new file with mode: 0644]
ccan/antithread/antithread.c

index 2cccc4531bd48ddfddc6b9b903237d49f3122c10..061c08d017f0604b154b091af03c37d88ab70e23 100644 (file)
@@ -4,7 +4,6 @@ TAGS
 *.o
 libccan.a
 config.h
-ccan/*-Makefile
 *~
 tools/ccan_depends
 tools/doc_extract
index b35531567d5751320f52e017466c7c301656353d..1e08982704956b7ba0465c4dd6062f7071534048 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -55,6 +55,10 @@ summary-check-%: tools/ccanlint/ccanlint $(OBJFILES)
 summary-fastcheck-%: tools/ccanlint/ccanlint $(OBJFILES)
        tools/ccanlint/ccanlint -x tests_pass_valgrind -x tests_compile_coverage -s ccan/$*
 
+# FIXME: Horrible hacks because % doesn't match /
+summary-fastcheck-antithread/%: tools/ccanlint/ccanlint $(OBJFILES)
+       tools/ccanlint/ccanlint -x tests_pass_valgrind -x tests_compile_coverage -s ccan/antithread/$*
+
 ccan/%/info: ccan/%/_info
        $(CC) $(CCAN_CFLAGS) -o $@ -x c $<
 
index 39e5c69579a767e4fcf1b99a21e166d30df44456..6641e53e6bf6c8228840a5a81a2014ca6cefb795 100644 (file)
@@ -25,8 +25,8 @@ MODS_NORMAL_NO_SRC := alignof \
        typesafe_cb
 
 # No external dependencies, with C code:
-MODS_NORMAL_WITH_SRC := alloc \
-       antithread \
+MODS_NORMAL_WITH_SRC := antithread \
+       antithread/alloc \
        asort \
        asprintf \
        autodata \
diff --git a/ccan/.gitignore b/ccan/.gitignore
new file mode 100644 (file)
index 0000000..714e66a
--- /dev/null
@@ -0,0 +1 @@
+*-Makefile
diff --git a/ccan/alloc/LICENSE b/ccan/alloc/LICENSE
deleted file mode 120000 (symlink)
index dc314ec..0000000
+++ /dev/null
@@ -1 +0,0 @@
-../../licenses/LGPL-2.1
\ No newline at end of file
diff --git a/ccan/alloc/_info b/ccan/alloc/_info
deleted file mode 100644 (file)
index e4a47ba..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-#include <stdio.h>
-#include <string.h>
-#include "config.h"
-
-/**
- * alloc - memory allocator routines
- *
- * The alloc module implements a simple allocator which you can use to
- * dynamically allocate space within a region of memory.  This can be useful
- * for suballocations within a given region, or a memory-mapped file.
- *
- * All metadata is kept within the memory handed to the allocator: you only
- * need hand the pointer and the size of the memory to each call.
- *
- * The region contents is always in offsets, so it can be mapped in different
- * places, but is not endian-safe.
- *
- * Example:
- *     #include <sys/mman.h>
- *     #include <unistd.h>
- *     #include <sys/types.h>
- *     #include <err.h>
- *     #include <sys/stat.h>
- *     #include <fcntl.h>
- *     #include <string.h>
- *     #include <stdlib.h>
- *     #include <ccan/alloc/alloc.h>
- *
- *     static void usage(const char *name)
- *     {
- *             errx(1, "Usage: %s --create <mapfile>\n"
- *                  " %s --check <mapfile>\n"
- *                  " %s --alloc <mapfile>\n"
- *                  " %s --free=<offset> <mapfile>\n", name, name, name, name);
- *     }
- *
- *     // Create a memory mapped file, and allocate from within it
- *     int main(int argc, char *argv[])
- *     {
- *             void *a, *p;
- *             int fd;
- *             enum { CREATE, CHECK, ALLOC, FREE } cmd;
- *
- *             if (argc != 3)
- *                     usage(argv[0]);
- *
- *             if (strcmp(argv[1], "--create") == 0)
- *                     cmd = CREATE;
- *             else if (strcmp(argv[1], "--check") == 0)
- *                     cmd = CHECK;
- *             else if (strcmp(argv[1], "--alloc") == 0)
- *                     cmd = ALLOC;
- *             else if (strncmp(argv[1], "--free=", strlen("--free=")) == 0)
- *                     cmd = FREE;
- *             else
- *                     usage(argv[0]);
- *
- *             if (cmd == CREATE) {
- *                     fd = open(argv[2], O_RDWR|O_CREAT|O_EXCL, 0600);
- *                     if (fd < 0)
- *                             err(1, "Could not create %s", argv[2]);
- *                     if (ftruncate(fd, 1048576) != 0)
- *                             err(1, "Could not set length on %s", argv[2]);
- *             } else {
- *                     fd = open(argv[2], O_RDWR);
- *                     if (fd < 0)
- *                             err(1, "Could not open %s", argv[2]);
- *             }
- *
- *             a = mmap(NULL, 1048576, PROT_READ|PROT_WRITE, MAP_SHARED, fd,0);
- *             if (a == MAP_FAILED)
- *                     err(1, "Could not map %s", argv[2]);
- *
- *             switch (cmd) {
- *             case CREATE:
- *                     alloc_init(a, 1048576);
- *                     break;
- *             case CHECK:
- *                     if (!alloc_check(a, 1048576))
- *                             err(1, "Region is corrupt");
- *                     break;
- *             case ALLOC:
- *                     p = alloc_get(a, 1048576, 1024, 16);
- *                     if (!p)
- *                             errx(1, "Could not allocate");
- *                     printf("%zu\n", (char *)p - (char *)a);
- *                     break;
- *             case FREE:
- *                     p = (char *)a + atol(argv[1] + strlen("--free="));
- *                     alloc_free(a, 1048576, p);
- *                     break;
- *             }
- *             return 0;
- *     }
- *
- * License: LGPL (v2.1 or any later version)
- * Author: Rusty Russell <rusty@rustcorp.com.au>
- */
-int main(int argc, char *argv[])
-{
-       if (argc != 2)
-               return 1;
-
-       if (strcmp(argv[1], "depends") == 0) {
-               printf("ccan/alignof\n");
-               printf("ccan/build_assert\n");
-               printf("ccan/compiler\n");
-               printf("ccan/ilog\n");
-               printf("ccan/likely\n");
-               printf("ccan/short_types\n");
-               return 0;
-       }
-
-       return 1;
-}
diff --git a/ccan/alloc/alloc.c b/ccan/alloc/alloc.c
deleted file mode 100644 (file)
index 47a5970..0000000
+++ /dev/null
@@ -1,1239 +0,0 @@
-/* Licensed under LGPLv2.1+ - see LICENSE file for details */
-#include <unistd.h>
-#include <stdint.h>
-#include <string.h>
-#include <limits.h>
-#include <assert.h>
-#include <stdlib.h>
-#include "alloc.h"
-#include "bitops.h"
-#include "tiny.h"
-#include <ccan/build_assert/build_assert.h>
-#include <ccan/likely/likely.h>
-#include <ccan/alignof/alignof.h>
-#include <ccan/short_types/short_types.h>
-#include <ccan/compiler/compiler.h>
-#include "config.h"
-
-/*
-   Inspired by (and parts taken from) Andrew Tridgell's alloc_mmap:
-   http://samba.org/~tridge/junkcode/alloc_mmap/
-
-   Copyright (C) Andrew Tridgell 2007
-   
-   This library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2 of the License, or (at your option) any later version.
-
-   This library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with this library; if not, write to the Free Software
-   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-/* We divide the pool into this many large pages (nearest power of 2) */
-#define MAX_LARGE_PAGES (256UL)
-
-/* 32 small pages == 1 large page. */
-#define BITS_FROM_SMALL_TO_LARGE_PAGE 5
-
-#define MAX_SMALL_PAGES (MAX_LARGE_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE)
-
-/* Smallest pool size for this scheme: 128-byte small pages.  That's
- * 9/13% overhead for 32/64 bit. */
-#define MIN_USEFUL_SIZE (MAX_SMALL_PAGES * 128)
-
-/* Every 4 buckets, we jump up a power of 2. ...8 10 12 14 16 20 24 28 32... */
-#define INTER_BUCKET_SPACE 4
-
-#define SMALL_PAGES_PER_LARGE_PAGE (1 << BITS_FROM_SMALL_TO_LARGE_PAGE)
-
-/* FIXME: Figure this out properly. */
-#define MAX_SIZE (1 << 30)
-
-/* How few object to fit in a page before using a larger one? (8) */
-#define MAX_PAGE_OBJECT_ORDER  3
-
-#define BITS_PER_LONG (sizeof(long) * CHAR_BIT)
-
-struct bucket_state {
-       u32 elements_per_page;
-       u16 page_list;
-       u16 full_list;
-};
-
-struct header {
-       /* Bitmap of which pages are large. */
-       unsigned long pagesize[MAX_LARGE_PAGES / BITS_PER_LONG];
-
-       /* List of unused small/large pages. */
-       u16 small_free_list;
-       u16 large_free_list;
-
-       /* List of huge allocs. */
-       unsigned long huge;
-
-       /* This is less defined: we have two buckets for each power of 2 */
-       struct bucket_state bs[1];
-};
-
-struct huge_alloc {
-       unsigned long next, prev;
-       unsigned long off, len;
-};
-
-struct page_header {
-       u16 next, prev;
-       /* FIXME: We can just count all-0 and all-1 used[] elements. */
-       unsigned elements_used : 25;
-       unsigned bucket : 7;
-       unsigned long used[1]; /* One bit per element. */
-};
-
-/*
- * Every 4 buckets, the size doubles.
- * Between buckets, sizes increase linearly.
- *
- * eg. bucket 40 = 2^10                        = 1024
- *     bucket 41 = 2^10 + 2^10*4       = 1024 + 256
- *     bucket 42 = 2^10 + 2^10*4       = 1024 + 512
- *     bucket 43 = 2^10 + 2^10*4       = 1024 + 768
- *     bucket 45 = 2^11                        = 2048
- *
- * Care is taken to handle low numbered buckets, at cost of overflow.
- */
-static unsigned long bucket_to_size(unsigned int bucket)
-{
-       unsigned long base = 1UL << (bucket / INTER_BUCKET_SPACE);
-       return base + ((bucket % INTER_BUCKET_SPACE)
-                      << (bucket / INTER_BUCKET_SPACE))
-               / INTER_BUCKET_SPACE;
-}
-
-/*
- * Say size is 10.
- *   fls(size/2) == 3.  1 << 3 == 8, so we're 2 too large, out of a possible
- * 8 too large.  That's 1/4 of the way to the next power of 2 == 1 bucket.
- *
- * We make sure we round up.  Note that this fails on 32 bit at size
- * 1879048193 (around bucket 120).
- */
-static unsigned int size_to_bucket(unsigned long size)
-{
-       unsigned int base = afls(size/2);
-       unsigned long overshoot;
-
-       overshoot = size - (1UL << base);
-       return base * INTER_BUCKET_SPACE
-               + ((overshoot * INTER_BUCKET_SPACE + (1UL << base)-1) >> base);
-}
-
-static unsigned int small_page_bits(unsigned long poolsize)
-{
-       return afls(poolsize / MAX_SMALL_PAGES - 1);
-}
-
-static struct page_header *from_pgnum(struct header *head,
-                                     unsigned long pgnum,
-                                     unsigned sp_bits)
-{
-       return (struct page_header *)((char *)head + (pgnum << sp_bits));
-}
-
-static u16 to_pgnum(struct header *head, void *p, unsigned sp_bits)
-{
-       return ((char *)p - (char *)head) >> sp_bits;
-}
-
-static size_t used_size(unsigned int num_elements)
-{
-       return align_up(num_elements, BITS_PER_LONG) / CHAR_BIT;
-}
-
-/*
- * We always align the first entry to the lower power of 2.
- * eg. the 12-byte bucket gets 8-byte aligned.  The 4096-byte bucket
- * gets 4096-byte aligned.
- */
-static unsigned long page_header_size(unsigned int align_bits,
-                                     unsigned long num_elements)
-{
-       unsigned long size;
-
-       size = sizeof(struct page_header)
-               - sizeof(((struct page_header *)0)->used)
-               + used_size(num_elements);
-       return align_up(size, 1UL << align_bits);
-}
-
-static void add_to_list(struct header *head,
-                       u16 *list, struct page_header *ph, unsigned sp_bits)
-{
-       unsigned long h = *list, offset = to_pgnum(head, ph, sp_bits);
-
-       ph->next = h;
-       if (h) {
-               struct page_header *prev = from_pgnum(head, h, sp_bits);
-               assert(prev->prev == 0);
-               prev->prev = offset;
-       }
-       *list = offset;
-       ph->prev = 0;
-}
-
-static void del_from_list(struct header *head,
-                         u16 *list, struct page_header *ph, unsigned sp_bits)
-{
-       /* Front of list? */
-       if (ph->prev == 0) {
-               *list = ph->next;
-       } else {
-               struct page_header *prev = from_pgnum(head, ph->prev, sp_bits);
-               prev->next = ph->next;
-       }
-       if (ph->next != 0) {
-               struct page_header *next = from_pgnum(head, ph->next, sp_bits);
-               next->prev = ph->prev;
-       }
-}
-
-static u16 pop_from_list(struct header *head,
-                                  u16 *list,
-                                  unsigned int sp_bits)
-{
-       u16 h = *list;
-       struct page_header *ph = from_pgnum(head, h, sp_bits);
-
-       if (likely(h)) {
-               *list = ph->next;
-               if (*list)
-                       from_pgnum(head, *list, sp_bits)->prev = 0;
-       }
-       return h;
-}
-
-static void add_to_huge_list(struct header *head, struct huge_alloc *ha)
-{
-       unsigned long h = head->huge;
-       unsigned long offset = (char *)ha - (char *)head;
-
-       ha->next = h;
-       if (h) {
-               struct huge_alloc *prev = (void *)((char *)head + h);
-               assert(prev->prev == 0);
-               prev->prev = offset;
-       }
-       head->huge = offset;
-       ha->prev = 0;
-}
-
-static void del_from_huge(struct header *head, struct huge_alloc *ha)
-{
-       /* Front of list? */
-       if (ha->prev == 0) {
-               head->huge = ha->next;
-       } else {
-               struct huge_alloc *prev = (void *)((char *)head + ha->prev);
-               prev->next = ha->next;
-       }
-       if (ha->next != 0) {
-               struct huge_alloc *next = (void *)((char *)head + ha->next);
-               next->prev = ha->prev;
-       }
-}
-
-static void add_small_page_to_freelist(struct header *head,
-                                      struct page_header *ph,
-                                      unsigned int sp_bits)
-{
-       add_to_list(head, &head->small_free_list, ph, sp_bits);
-}
-
-static void add_large_page_to_freelist(struct header *head,
-                                      struct page_header *ph,
-                                      unsigned int sp_bits)
-{
-       add_to_list(head, &head->large_free_list, ph, sp_bits);
-}
-
-static void add_to_bucket_list(struct header *head,
-                              struct bucket_state *bs,
-                              struct page_header *ph,
-                              unsigned int sp_bits)
-{
-       add_to_list(head, &bs->page_list, ph, sp_bits);
-}
-
-static void del_from_bucket_list(struct header *head,
-                                struct bucket_state *bs,
-                                struct page_header *ph,
-                                unsigned int sp_bits)
-{
-       del_from_list(head, &bs->page_list, ph, sp_bits);
-}
-
-static void del_from_bucket_full_list(struct header *head,
-                                     struct bucket_state *bs,
-                                     struct page_header *ph,
-                                     unsigned int sp_bits)
-{
-       del_from_list(head, &bs->full_list, ph, sp_bits);
-}
-
-static void add_to_bucket_full_list(struct header *head,
-                                   struct bucket_state *bs,
-                                   struct page_header *ph,
-                                   unsigned int sp_bits)
-{
-       add_to_list(head, &bs->full_list, ph, sp_bits);
-}
-
-static void clear_bit(unsigned long bitmap[], unsigned int off)
-{
-       bitmap[off / BITS_PER_LONG] &= ~(1UL << (off % BITS_PER_LONG));
-}
-
-static bool test_bit(const unsigned long bitmap[], unsigned int off)
-{
-       return bitmap[off / BITS_PER_LONG] & (1UL << (off % BITS_PER_LONG));
-}
-
-static void set_bit(unsigned long bitmap[], unsigned int off)
-{
-       bitmap[off / BITS_PER_LONG] |= (1UL << (off % BITS_PER_LONG));
-}
-
-/* There must be a bit to be found. */
-static unsigned int find_free_bit(const unsigned long bitmap[])
-{
-       unsigned int i;
-
-       for (i = 0; bitmap[i] == -1UL; i++);
-       return (i*BITS_PER_LONG) + affsl(~bitmap[i]) - 1;
-}
-
-/* How many elements can we fit in a page? */
-static unsigned long elements_per_page(unsigned long align_bits,
-                                      unsigned long esize,
-                                      unsigned long psize)
-{
-       unsigned long num, overhead;
-
-       /* First approximation: no extra room for bitmap. */
-       overhead = align_up(sizeof(struct page_header), 1UL << align_bits);
-       num = (psize - overhead) / esize;
-
-       while (page_header_size(align_bits, num) + esize * num > psize)
-               num--;
-       return num;
-}
-
-static bool large_page_bucket(unsigned int bucket, unsigned int sp_bits)
-{
-       unsigned long max_smallsize;
-
-       /* Note: this doesn't take into account page header. */
-       max_smallsize = (1UL << sp_bits) >> MAX_PAGE_OBJECT_ORDER;
-
-       return bucket_to_size(bucket) > max_smallsize;
-}
-
-static unsigned int max_bucket(unsigned int lp_bits)
-{
-       return (lp_bits - MAX_PAGE_OBJECT_ORDER) * INTER_BUCKET_SPACE;
-}
-
-void alloc_init(void *pool, unsigned long poolsize)
-{
-       struct header *head = pool;
-       struct page_header *ph;
-       unsigned int lp_bits, sp_bits, num_buckets;
-       unsigned long header_size, i;
-
-       if (poolsize < MIN_USEFUL_SIZE) {
-               tiny_alloc_init(pool, poolsize);
-               return;
-       }
-
-       /* We rely on page numbers fitting in 16 bit. */
-       BUILD_ASSERT(MAX_SMALL_PAGES < 65536);
-       
-       sp_bits = small_page_bits(poolsize);
-       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
-
-       num_buckets = max_bucket(lp_bits);
-
-       head = pool;
-       header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
-
-       memset(head, 0, header_size);
-       for (i = 0; i < num_buckets; i++) {
-               unsigned long pagesize;
-
-               if (large_page_bucket(i, sp_bits))
-                       pagesize = 1UL << lp_bits;
-               else
-                       pagesize = 1UL << sp_bits;
-
-               head->bs[i].elements_per_page
-                       = elements_per_page(i / INTER_BUCKET_SPACE,
-                                           bucket_to_size(i),
-                                           pagesize);
-       }
-
-       /* They start as all large pages. */
-       memset(head->pagesize, 0xFF, sizeof(head->pagesize));
-       /* FIXME: small pages for last bit? */
-
-       /* Split first page into small pages. */
-       assert(header_size < (1UL << lp_bits));
-       clear_bit(head->pagesize, 0);
-
-       /* Skip over page(s) used by header, add rest to free list */
-       for (i = align_up(header_size, (1UL << sp_bits)) >> sp_bits;
-            i < SMALL_PAGES_PER_LARGE_PAGE;
-            i++) {
-               ph = from_pgnum(head, i, sp_bits);
-               ph->elements_used = 0;
-               add_small_page_to_freelist(head, ph, sp_bits);
-       }
-
-       /* Add the rest of the pages as large pages. */
-       i = SMALL_PAGES_PER_LARGE_PAGE;
-       while ((i << sp_bits) + (1UL << lp_bits) <= poolsize) {
-               assert(i < MAX_SMALL_PAGES);
-               ph = from_pgnum(head, i, sp_bits);
-               ph->elements_used = 0;
-               add_large_page_to_freelist(head, ph, sp_bits);
-               i += SMALL_PAGES_PER_LARGE_PAGE;
-       }
-}
-
-/* A large page worth of small pages are free: delete them from free list. */
-static void del_large_from_small_free_list(struct header *head,
-                                          struct page_header *ph,
-                                          unsigned int sp_bits)
-{
-       unsigned long i;
-
-       for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
-               del_from_list(head, &head->small_free_list,
-                             (struct page_header *)((char *)ph
-                                                    + (i << sp_bits)),
-                             sp_bits);
-       }
-}
-
-static bool all_empty(struct header *head,
-                     unsigned long pgnum,
-                     unsigned sp_bits)
-{
-       unsigned long i;
-
-       for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
-               struct page_header *ph = from_pgnum(head, pgnum + i, sp_bits);
-               if (ph->elements_used)
-                       return false;
-       }
-       return true;
-}
-
-static void recombine_small_pages(struct header *head, unsigned long poolsize,
-                                 unsigned int sp_bits)
-{
-       unsigned long i;
-       unsigned int lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
-
-       /* Look for small pages to coalesce, after first large page. */
-       for (i = SMALL_PAGES_PER_LARGE_PAGE;
-            i < (poolsize >> lp_bits) << BITS_FROM_SMALL_TO_LARGE_PAGE;
-            i += SMALL_PAGES_PER_LARGE_PAGE) {
-               /* Already a large page? */
-               if (test_bit(head->pagesize, i / SMALL_PAGES_PER_LARGE_PAGE))
-                       continue;
-               if (all_empty(head, i, sp_bits)) {
-                       struct page_header *ph = from_pgnum(head, i, sp_bits);
-                       set_bit(head->pagesize,
-                               i / SMALL_PAGES_PER_LARGE_PAGE);
-                       del_large_from_small_free_list(head, ph, sp_bits);
-                       add_large_page_to_freelist(head, ph, sp_bits);
-               }
-       }
-}
-
-static u16 get_large_page(struct header *head, unsigned long poolsize,
-                         unsigned int sp_bits)
-{
-       unsigned int page;
-
-       page = pop_from_list(head, &head->large_free_list, sp_bits);
-       if (likely(page))
-               return page;
-
-       recombine_small_pages(head, poolsize, sp_bits);
-
-       return pop_from_list(head, &head->large_free_list, sp_bits);
-}
-
-/* Returns small page. */
-static unsigned long break_up_large_page(struct header *head,
-                                        unsigned int sp_bits,
-                                        u16 lpage)
-{
-       unsigned int i;
-
-       clear_bit(head->pagesize, lpage >> BITS_FROM_SMALL_TO_LARGE_PAGE);
-
-       for (i = 1; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
-               struct page_header *ph = from_pgnum(head, lpage + i, sp_bits);
-               /* Initialize this: huge_alloc reads it. */
-               ph->elements_used = 0;
-               add_small_page_to_freelist(head, ph, sp_bits);
-       }
-
-       return lpage;
-}
-
-static u16 get_small_page(struct header *head, unsigned long poolsize,
-                         unsigned int sp_bits)
-{
-       u16 ret;
-
-       ret = pop_from_list(head, &head->small_free_list, sp_bits);
-       if (likely(ret))
-               return ret;
-       ret = get_large_page(head, poolsize, sp_bits);
-       if (likely(ret))
-               ret = break_up_large_page(head, sp_bits, ret);
-       return ret;
-}
-
-static bool huge_allocated(struct header *head, unsigned long offset)
-{
-       unsigned long i;
-       struct huge_alloc *ha;
-
-       for (i = head->huge; i; i = ha->next) {
-               ha = (void *)((char *)head + i);
-               if (ha->off <= offset && ha->off + ha->len > offset)
-                       return true;
-       }
-       return false;
-}
-
-/* They want something really big.  Aim for contiguous pages (slow). */
-static COLD void *huge_alloc(void *pool, unsigned long poolsize,
-                            unsigned long size, unsigned long align)
-{
-       struct header *head = pool;
-       struct huge_alloc *ha;
-       unsigned long i, sp_bits, lp_bits, num, header_size;
-
-       sp_bits = small_page_bits(poolsize);
-       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
-
-       /* Allocate tracking structure optimistically. */
-       ha = alloc_get(pool, poolsize, sizeof(*ha), ALIGNOF(*ha));
-       if (!ha)
-               return NULL;
-
-       /* First search for contiguous small pages... */
-       header_size = sizeof(*head) + sizeof(head->bs) * (max_bucket(lp_bits)-1);
-
-       num = 0;
-       for (i = (header_size + (1UL << sp_bits) - 1) >> sp_bits;
-            i << sp_bits < poolsize;
-            i++) {
-               struct page_header *pg;
-               unsigned long off = (i << sp_bits);
-
-               /* Skip over large pages. */
-               if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
-                       i += (1UL << BITS_FROM_SMALL_TO_LARGE_PAGE)-1;
-                       continue;
-               }
-
-               /* Does this page meet alignment requirements? */
-               if (!num && off % align != 0)
-                       continue;
-
-               /* FIXME: This makes us O(n^2). */
-               if (huge_allocated(head, off)) {
-                       num = 0;
-                       continue;
-               }
-
-               pg = (struct page_header *)((char *)head + off);
-               if (pg->elements_used) {
-                       num = 0;
-                       continue;
-               }
-
-               num++;
-               if (num << sp_bits >= size) {
-                       unsigned long pgnum;
-
-                       /* Remove from free list. */
-                       for (pgnum = i; pgnum > i - num; pgnum--) {
-                               pg = from_pgnum(head, pgnum, sp_bits);
-                               del_from_list(head,
-                                             &head->small_free_list,
-                                             pg, sp_bits);
-                       }
-                       ha->off = (i - num + 1) << sp_bits;
-                       ha->len = num << sp_bits;
-                       goto done;
-               }
-       }
-
-       /* Now search for large pages... */
-       recombine_small_pages(head, poolsize, sp_bits);
-
-       num = 0;
-       for (i = (header_size + (1UL << lp_bits) - 1) >> lp_bits;
-            (i << lp_bits) < poolsize; i++) {
-               struct page_header *pg;
-               unsigned long off = (i << lp_bits);
-
-               /* Ignore small pages. */
-               if (!test_bit(head->pagesize, i))
-                       continue;
-
-               /* Does this page meet alignment requirements? */
-               if (!num && off % align != 0)
-                       continue;
-
-               /* FIXME: This makes us O(n^2). */
-               if (huge_allocated(head, off)) {
-                       num = 0;
-                       continue;
-               }
-
-               pg = (struct page_header *)((char *)head + off);
-               if (pg->elements_used) {
-                       num = 0;
-                       continue;
-               }
-
-               num++;
-               if (num << lp_bits >= size) {
-                       unsigned long pgnum;
-
-                       /* Remove from free list. */
-                       for (pgnum = i; pgnum > i - num; pgnum--) {
-                               pg = from_pgnum(head, pgnum, lp_bits);
-                               del_from_list(head,
-                                             &head->large_free_list,
-                                             pg, sp_bits);
-                       }
-                       ha->off = (i - num + 1) << lp_bits;
-                       ha->len = num << lp_bits;
-                       goto done;
-               }
-       }
-
-       /* Unable to satisfy: free huge alloc structure. */
-       alloc_free(pool, poolsize, ha);
-       return NULL;
-
-done:
-       add_to_huge_list(pool, ha);
-       return (char *)pool + ha->off;
-}
-
-static COLD void
-huge_free(struct header *head, unsigned long poolsize, void *free)
-{
-       unsigned long i, off, pgnum, free_off = (char *)free - (char *)head;
-       unsigned int sp_bits, lp_bits;
-       struct huge_alloc *ha;
-
-       for (i = head->huge; i; i = ha->next) {
-               ha = (void *)((char *)head + i);
-               if (free_off == ha->off)
-                       break;
-       }
-       assert(i);
-
-       /* Free up all the pages, delete and free ha */
-       sp_bits = small_page_bits(poolsize);
-       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
-       pgnum = free_off >> sp_bits;
-
-       if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
-               for (off = ha->off;
-                    off < ha->off + ha->len;
-                    off += 1UL << lp_bits) {
-                       add_large_page_to_freelist(head,
-                                                  (void *)((char *)head + off),
-                                                  sp_bits);
-               }
-       } else {
-               for (off = ha->off;
-                    off < ha->off + ha->len;
-                    off += 1UL << sp_bits) {
-                       add_small_page_to_freelist(head,
-                                                  (void *)((char *)head + off),
-                                                  sp_bits);
-               }
-       }
-       del_from_huge(head, ha);
-       alloc_free(head, poolsize, ha);
-}
-
-static COLD unsigned long huge_size(struct header *head, void *p)
-{
-       unsigned long i, off = (char *)p - (char *)head;
-       struct huge_alloc *ha;
-
-       for (i = head->huge; i; i = ha->next) {
-               ha = (void *)((char *)head + i);
-               if (off == ha->off) {
-                       return ha->len;
-               }
-       }
-       abort();
-}
-
-void *alloc_get(void *pool, unsigned long poolsize,
-               unsigned long size, unsigned long align)
-{
-       struct header *head = pool;
-       unsigned int bucket;
-       unsigned long i;
-       struct bucket_state *bs;
-       struct page_header *ph;
-       unsigned int sp_bits;
-
-       if (poolsize < MIN_USEFUL_SIZE) {
-               return tiny_alloc_get(pool, poolsize, size, align);
-       }
-
-       size = align_up(size, align);
-       if (unlikely(!size))
-               size = 1;
-       bucket = size_to_bucket(size);
-
-       sp_bits = small_page_bits(poolsize);
-
-       if (bucket >= max_bucket(sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE)) {
-               return huge_alloc(pool, poolsize, size, align);
-       }
-
-       bs = &head->bs[bucket];
-
-       if (!bs->page_list) {
-               struct page_header *ph;
-
-               if (large_page_bucket(bucket, sp_bits))
-                       bs->page_list = get_large_page(head, poolsize,
-                                                      sp_bits);
-               else
-                       bs->page_list = get_small_page(head, poolsize,
-                                                      sp_bits);
-               /* FIXME: Try large-aligned alloc?  Header stuffing? */
-               if (unlikely(!bs->page_list))
-                       return NULL;
-               ph = from_pgnum(head, bs->page_list, sp_bits);
-               ph->bucket = bucket;
-               ph->elements_used = 0;
-               ph->next = 0;
-               memset(ph->used, 0, used_size(bs->elements_per_page));
-       }
-
-       ph = from_pgnum(head, bs->page_list, sp_bits);
-
-       i = find_free_bit(ph->used);
-       set_bit(ph->used, i);
-       ph->elements_used++;
-
-       /* check if this page is now full */
-       if (unlikely(ph->elements_used == bs->elements_per_page)) {
-               del_from_bucket_list(head, bs, ph, sp_bits);
-               add_to_bucket_full_list(head, bs, ph, sp_bits);
-       }
-
-       return (char *)ph + page_header_size(ph->bucket / INTER_BUCKET_SPACE,
-                                            bs->elements_per_page)
-              + i * bucket_to_size(bucket);
-}
-
-void alloc_free(void *pool, unsigned long poolsize, void *free)
-{
-       struct header *head = pool;
-       struct bucket_state *bs;
-       unsigned int sp_bits;
-       unsigned long i, pgnum, pgoffset, offset = (char *)free - (char *)pool;
-       bool smallpage;
-       struct page_header *ph;
-
-       if (poolsize < MIN_USEFUL_SIZE) {
-               tiny_alloc_free(pool, poolsize, free);
-               return;
-       }
-       
-       /* Get page header. */
-       sp_bits = small_page_bits(poolsize);
-       pgnum = offset >> sp_bits;
-
-       /* Big page? Round down further. */
-       if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
-               smallpage = false;
-               pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
-       } else
-               smallpage = true;
-
-       /* Step back to page header. */
-       ph = from_pgnum(head, pgnum, sp_bits);
-       if ((void *)ph == free) {
-               huge_free(head, poolsize, free);
-               return;
-       }
-
-       bs = &head->bs[ph->bucket];
-       pgoffset = offset - (pgnum << sp_bits)
-               - page_header_size(ph->bucket / INTER_BUCKET_SPACE,
-                                  bs->elements_per_page);
-
-       if (unlikely(ph->elements_used == bs->elements_per_page)) {
-               del_from_bucket_full_list(head, bs, ph, sp_bits);
-               add_to_bucket_list(head, bs, ph, sp_bits);
-       }
-
-       /* Which element are we? */
-       i = pgoffset / bucket_to_size(ph->bucket);
-       clear_bit(ph->used, i);
-       ph->elements_used--;
-
-       if (unlikely(ph->elements_used == 0)) {
-               bs = &head->bs[ph->bucket];
-               del_from_bucket_list(head, bs, ph, sp_bits);
-               if (smallpage)
-                       add_small_page_to_freelist(head, ph, sp_bits);
-               else
-                       add_large_page_to_freelist(head, ph, sp_bits);
-       }
-}
-
-unsigned long alloc_size(void *pool, unsigned long poolsize, void *p)
-{
-       struct header *head = pool;
-       unsigned int pgnum, sp_bits;
-       unsigned long offset = (char *)p - (char *)pool;
-       struct page_header *ph;
-
-       if (poolsize < MIN_USEFUL_SIZE)
-               return tiny_alloc_size(pool, poolsize, p);
-
-       /* Get page header. */
-       sp_bits = small_page_bits(poolsize);
-       pgnum = offset >> sp_bits;
-
-       /* Big page? Round down further. */
-       if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE))
-               pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
-
-       /* Step back to page header. */
-       ph = from_pgnum(head, pgnum, sp_bits);
-       if ((void *)ph == p)
-               return huge_size(head, p);
-
-       return bucket_to_size(ph->bucket);
-}
-
-/* Useful for gdb breakpoints. */
-static bool check_fail(void)
-{
-       return false;
-}
-
-static unsigned long count_bits(const unsigned long bitmap[],
-                               unsigned long limit)
-{
-       unsigned long i, count = 0;
-
-       while (limit >= BITS_PER_LONG) {
-               count += popcount(bitmap[0]);
-               bitmap++;
-               limit -= BITS_PER_LONG;
-       }
-
-       for (i = 0; i < limit; i++)
-               if (test_bit(bitmap, i))
-                       count++;
-       return count;
-}
-
-static bool out_of_bounds(unsigned long pgnum,
-                         unsigned int sp_bits,
-                         unsigned long pagesize,
-                         unsigned long poolsize)
-{
-       if (((pgnum << sp_bits) >> sp_bits) != pgnum)
-               return true;
-
-       if ((pgnum << sp_bits) > poolsize)
-               return true;
-
-       return ((pgnum << sp_bits) + pagesize > poolsize);
-}
-
-static bool check_bucket(struct header *head,
-                        unsigned long poolsize,
-                        unsigned long pages[],
-                        struct bucket_state *bs,
-                        unsigned int bindex)
-{
-       bool lp_bucket;
-       struct page_header *ph;
-       unsigned long taken, i, prev, pagesize, sp_bits, lp_bits;
-
-       sp_bits = small_page_bits(poolsize);
-       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
-
-       lp_bucket = large_page_bucket(bindex, sp_bits);
-
-       pagesize = 1UL << (lp_bucket ? lp_bits : sp_bits);
-
-       /* This many elements fit? */
-       taken = page_header_size(bindex / INTER_BUCKET_SPACE,
-                                bs->elements_per_page);
-       taken += bucket_to_size(bindex) * bs->elements_per_page;
-       if (taken > pagesize)
-               return check_fail();
-
-       /* One more wouldn't fit? */
-       taken = page_header_size(bindex / INTER_BUCKET_SPACE,
-                                bs->elements_per_page + 1);
-       taken += bucket_to_size(bindex) * (bs->elements_per_page + 1);
-       if (taken <= pagesize)
-               return check_fail();
-
-       /* Walk used list. */
-       prev = 0;
-       for (i = bs->page_list; i; i = ph->next) {
-               /* Bad pointer? */
-               if (out_of_bounds(i, sp_bits, pagesize, poolsize))
-                       return check_fail();
-               /* Wrong size page? */
-               if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
-                   != lp_bucket)
-                       return check_fail();
-               /* Large page not on boundary? */
-               if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
-                       return check_fail();
-               ph = from_pgnum(head, i, sp_bits);
-               /* Linked list corrupt? */
-               if (ph->prev != prev)
-                       return check_fail();
-               /* Already seen this page? */
-               if (test_bit(pages, i))
-                       return check_fail();
-               set_bit(pages, i);
-               /* Empty or full? */
-               if (ph->elements_used == 0)
-                       return check_fail();
-               if (ph->elements_used >= bs->elements_per_page)
-                       return check_fail();
-               /* Used bits don't agree? */
-               if (ph->elements_used != count_bits(ph->used,
-                                                   bs->elements_per_page))
-                       return check_fail();
-               /* Wrong bucket? */
-               if (ph->bucket != bindex)
-                       return check_fail();
-               prev = i;
-       }
-
-       /* Walk full list. */
-       prev = 0;
-       for (i = bs->full_list; i; i = ph->next) {
-               /* Bad pointer? */
-               if (out_of_bounds(i, sp_bits, pagesize, poolsize))
-                       return check_fail();
-               /* Wrong size page? */
-               if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
-                   != lp_bucket)
-               /* Large page not on boundary? */
-               if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
-                       return check_fail();
-               ph = from_pgnum(head, i, sp_bits);
-               /* Linked list corrupt? */
-               if (ph->prev != prev)
-                       return check_fail();
-               /* Already seen this page? */
-               if (test_bit(pages, i))
-                       return check_fail();
-               set_bit(pages, i);
-               /* Not full? */
-               if (ph->elements_used != bs->elements_per_page)
-                       return check_fail();
-               /* Used bits don't agree? */
-               if (ph->elements_used != count_bits(ph->used,
-                                                   bs->elements_per_page))
-                       return check_fail();
-               /* Wrong bucket? */
-               if (ph->bucket != bindex)
-                       return check_fail();
-               prev = i;
-       }
-       return true;
-}
-
-bool alloc_check(void *pool, unsigned long poolsize)
-{
-       struct header *head = pool;
-       unsigned long prev, i, lp_bits, sp_bits, header_size, num_buckets;
-       struct page_header *ph;
-       struct huge_alloc *ha;
-       unsigned long pages[MAX_SMALL_PAGES / BITS_PER_LONG] = { 0 };
-
-       if (poolsize < MIN_USEFUL_SIZE)
-               return tiny_alloc_check(pool, poolsize);
-
-       sp_bits = small_page_bits(poolsize);
-       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
-
-       num_buckets = max_bucket(lp_bits);
-
-       header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
-
-       /* First, set all bits taken by header. */
-       for (i = 0; i < header_size; i += (1UL << sp_bits))
-               set_bit(pages, i >> sp_bits);
-
-       /* Check small page free list. */
-       prev = 0;
-       for (i = head->small_free_list; i; i = ph->next) {
-               /* Bad pointer? */
-               if (out_of_bounds(i, sp_bits, 1UL << sp_bits, poolsize))
-                       return check_fail();
-               /* Large page? */
-               if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
-                       return check_fail();
-               ph = from_pgnum(head, i, sp_bits);
-               /* Linked list corrupt? */
-               if (ph->prev != prev)
-                       return check_fail();
-               /* Already seen this page? */
-               if (test_bit(pages, i))
-                       return check_fail();
-               set_bit(pages, i);
-               prev = i;
-       }
-
-       /* Check large page free list. */
-       prev = 0;
-       for (i = head->large_free_list; i; i = ph->next) {
-               /* Bad pointer? */
-               if (out_of_bounds(i, sp_bits, 1UL << lp_bits, poolsize))
-                       return check_fail();
-               /* Not large page? */
-               if (!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
-                       return check_fail();
-               /* Not page boundary? */
-               if ((i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
-                       return check_fail();
-               ph = from_pgnum(head, i, sp_bits);
-               /* Linked list corrupt? */
-               if (ph->prev != prev)
-                       return check_fail();
-               /* Already seen this page? */
-               if (test_bit(pages, i))
-                       return check_fail();
-               set_bit(pages, i);
-               prev = i;
-       }
-
-       /* Check the buckets. */
-       for (i = 0; i < max_bucket(lp_bits); i++) {
-               struct bucket_state *bs = &head->bs[i];
-
-               if (!check_bucket(head, poolsize, pages, bs, i))
-                       return false;
-       }
-
-       /* Check the huge alloc list. */
-       prev = 0;
-       for (i = head->huge; i; i = ha->next) {
-               unsigned long pgbits, j;
-
-               /* Bad pointer? */
-               if (i >= poolsize || i + sizeof(*ha) > poolsize)
-                       return check_fail();
-               ha = (void *)((char *)head + i);
-
-               /* Check contents of ha. */
-               if (ha->off > poolsize || ha->off + ha->len > poolsize)
-                       return check_fail();
-
-               /* Large or small page? */
-               pgbits = test_bit(head->pagesize, ha->off >> lp_bits)
-                       ? lp_bits : sp_bits;
-
-               /* Not page boundary? */
-               if ((ha->off % (1UL << pgbits)) != 0)
-                       return check_fail();
-
-               /* Not page length? */
-               if ((ha->len % (1UL << pgbits)) != 0)
-                       return check_fail();
-
-               /* Linked list corrupt? */
-               if (ha->prev != prev)
-                       return check_fail();
-
-               for (j = ha->off; j < ha->off + ha->len; j += (1UL<<sp_bits)) {
-                       /* Already seen this page? */
-                       if (test_bit(pages, j >> sp_bits))
-                               return check_fail();
-                       set_bit(pages, j >> sp_bits);
-               }
-
-               prev = i;
-       }
-               
-       /* Make sure every page accounted for. */
-       for (i = 0; i < poolsize >> sp_bits; i++) {
-               if (!test_bit(pages, i))
-                       return check_fail();
-               if (test_bit(head->pagesize,
-                            i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
-                       /* Large page, skip rest. */
-                       i += SMALL_PAGES_PER_LARGE_PAGE - 1;
-               }
-       }
-
-       return true;
-}
-
-static unsigned long print_overhead(FILE *out, const char *desc,
-                                   unsigned long bytes,
-                                   unsigned long poolsize)
-{
-       fprintf(out, "Overhead (%s): %lu bytes (%.3g%%)\n",
-               desc, bytes, 100.0 * bytes / poolsize);
-       return bytes;
-}
-
-static unsigned long count_list(struct header *head,
-                               u16 pgnum,
-                               unsigned int sp_bits,
-                               unsigned long *total_elems)
-{
-       struct page_header *p;
-       unsigned long ret = 0;
-
-       while (pgnum) {
-               p = from_pgnum(head, pgnum, sp_bits);
-               if (total_elems)
-                       (*total_elems) += p->elements_used;
-               ret++;
-               pgnum = p->next;
-       }
-       return ret;
-}
-
-static unsigned long visualize_bucket(FILE *out, struct header *head,
-                                     unsigned int bucket,
-                                     unsigned long poolsize,
-                                     unsigned int sp_bits)
-{
-       unsigned long num_full, num_partial, num_pages, page_size,
-               elems, hdr_min, hdr_size, elems_per_page, overhead = 0;
-
-       elems_per_page = head->bs[bucket].elements_per_page;
-
-       /* If we used byte-based bitmaps, we could get pg hdr to: */
-       hdr_min = sizeof(struct page_header)
-               - sizeof(((struct page_header *)0)->used)
-               + align_up(elems_per_page, CHAR_BIT) / CHAR_BIT;
-       hdr_size = page_header_size(bucket / INTER_BUCKET_SPACE,
-                                   elems_per_page);
-
-       elems = 0;
-       num_full = count_list(head, head->bs[bucket].full_list, sp_bits,
-                             &elems);
-       num_partial = count_list(head, head->bs[bucket].page_list, sp_bits,
-                                &elems);
-       num_pages = num_full + num_partial;
-       if (!num_pages)
-               return 0;
-
-       fprintf(out, "Bucket %u (%lu bytes):"
-               " %lu full, %lu partial = %lu elements\n",
-               bucket, bucket_to_size(bucket), num_full, num_partial, elems);
-       /* Strict requirement of page header size. */
-       overhead += print_overhead(out, "page headers",
-                                  hdr_min * num_pages, poolsize);
-       /* Gap between minimal page header and actual start. */
-       overhead += print_overhead(out, "page post-header alignments",
-                                  (hdr_size - hdr_min) * num_pages, poolsize);
-       /* Between last element and end of page. */
-       page_size = (1UL << sp_bits);
-       if (large_page_bucket(bucket, sp_bits))
-               page_size <<= BITS_FROM_SMALL_TO_LARGE_PAGE;
-
-       overhead += print_overhead(out, "page tails",
-                                  (page_size - (hdr_size
-                                                + (elems_per_page
-                                                   * bucket_to_size(bucket))))
-                                  * num_pages, poolsize);
-       return overhead;
-}
-
-void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
-{
-       struct header *head = pool;
-       unsigned long i, lp_bits, sp_bits, header_size, num_buckets, count,
-               overhead = 0;
-
-       fprintf(out, "Pool %p size %lu: (%s allocator)\n", pool, poolsize,
-               poolsize < MIN_USEFUL_SIZE ? "tiny" : "standard");
-
-       if (poolsize < MIN_USEFUL_SIZE) {
-               tiny_alloc_visualize(out, pool, poolsize);
-               return;
-       }
-       
-       sp_bits = small_page_bits(poolsize);
-       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
-
-       num_buckets = max_bucket(lp_bits);
-       header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
-
-       fprintf(out, "Large page size %lu, small page size %lu.\n",
-               1UL << lp_bits, 1UL << sp_bits);
-       overhead += print_overhead(out, "unused pool tail",
-                                  poolsize % (1UL << lp_bits), poolsize);
-       fprintf(out, "Main header %lu bytes (%lu small pages).\n",
-               header_size, align_up(header_size, 1UL << sp_bits) >> sp_bits);
-       overhead += print_overhead(out, "partial header page",
-                                  align_up(header_size, 1UL << sp_bits)
-                                  - header_size, poolsize);
-       /* Total large pages. */
-       i = count_bits(head->pagesize, poolsize >> lp_bits);
-       /* Used pages. */
-       count = i - count_list(head, head->large_free_list, sp_bits, NULL);
-       fprintf(out, "%lu/%lu large pages used (%.3g%%)\n",
-               count, i, count ? 100.0 * count / i : 0.0);
-
-       /* Total small pages. */
-       i = ((poolsize >> lp_bits) - i) << BITS_FROM_SMALL_TO_LARGE_PAGE;
-       /* Used pages */
-       count = i - count_list(head, head->small_free_list, sp_bits, NULL);
-       fprintf(out, "%lu/%lu small pages used (%.3g%%)\n",
-               count, i, count ? 100.0 * count / i : 0.0);
-
-       /* Summary of each bucket. */
-       fprintf(out, "%lu buckets:\n", num_buckets);
-       for (i = 0; i < num_buckets; i++)
-               overhead += visualize_bucket(out, head, i, poolsize, sp_bits);
-
-       print_overhead(out, "total", overhead, poolsize);
-}
diff --git a/ccan/alloc/alloc.h b/ccan/alloc/alloc.h
deleted file mode 100644 (file)
index 8ffa169..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-/* Licensed under LGPLv2.1+ - see LICENSE file for details */
-#ifndef ALLOC_H
-#define ALLOC_H
-#include <stdio.h>
-#include <stdbool.h>
-
-/**
- * alloc_init - initialize a pool of memory for the allocator.
- * @pool: the contiguous bytes for the allocator to use
- * @poolsize: the size of the pool
- *
- * This stores all the setup state required to perform allocation within the
- * pool (there is no external state).  Any previous contents of @pool is
- * discarded.
- *
- * The same @pool and @poolsize arguments must be handed to the other alloc
- * functions after this.
- *
- * If the pool is too small for meaningful allocations, alloc_get will fail.
- *
- * Example:
- *     void *pool = malloc(32*1024*1024);
- *     if (!pool)
- *             err(1, "Failed to allocate 32MB");
- *     alloc_init(pool, 32*1024*1024);
- */
-void alloc_init(void *pool, unsigned long poolsize);
-
-/**
- * alloc_get - allocate some memory from the pool
- * @pool: the contiguous bytes for the allocator to use
- * @poolsize: the size of the pool
- * @size: the size of the desired allocation
- * @align: the alignment of the desired allocation (0 or power of 2)
- *
- * This is "malloc" within an initialized pool.
- *
- * It will return a unique pointer within the pool (ie. between @pool
- * and @pool+@poolsize) which meets the alignment requirements of
- * @align.  Note that the alignment is relative to the start of the pool,
- * so of @pool is not aligned, the pointer won't be either.
- *
- * Returns NULL if there is no contiguous room.
- *
- * Example:
- *     #include <ccan/alignof/alignof.h>
- *     ...
- *             double *d = alloc_get(pool, 32*1024*1024,
- *                                   sizeof(*d), ALIGNOF(*d));
- *             if (!d)
- *                     err(1, "Failed to allocate a double");
- */
-void *alloc_get(void *pool, unsigned long poolsize,
-               unsigned long size, unsigned long align);
-
-/**
- * alloc_free - free some allocated memory from the pool
- * @pool: the contiguous bytes for the allocator to use
- * @poolsize: the size of the pool
- * @p: the non-NULL pointer returned from alloc_get.
- *
- * This is "free" within an initialized pool.  A pointer should only be
- * freed once, and must be a pointer returned from a successful alloc_get()
- * call.
- *
- * Example:
- *     alloc_free(pool, 32*1024*1024, d);
- */
-void alloc_free(void *pool, unsigned long poolsize, void *free);
-
-/**
- * alloc_size - get the actual size allocated by alloc_get
- * @pool: the contiguous bytes for the allocator to use
- * @poolsize: the size of the pool
- * @p: the non-NULL pointer returned from alloc_get.
- *
- * alloc_get() may overallocate, in which case you may use the extra
- * space exactly as if you had asked for it.
- *
- * The return value will always be at least the @size passed to alloc_get().
- *
- * Example:
- *     printf("Allocating a double actually got me %lu bytes\n",
- *             alloc_size(pool, 32*1024*1024, d));
- */
-unsigned long alloc_size(void *pool, unsigned long poolsize, void *p);
-
-/**
- * alloc_check - check the integrity of the allocation pool
- * @pool: the contiguous bytes for the allocator to use
- * @poolsize: the size of the pool
- *
- * alloc_check() can be used for debugging suspected pool corruption.  It may
- * be quite slow, but provides some assistance for hard-to-find overruns or
- * double-frees.  Unlike the rest of the code, it will not crash on corrupted
- * pools.
- *
- * There is an internal function check_fail() which this calls on failure which
- * is useful for placing breakpoints and gaining more insight into the type
- * of the corruption detected.
- *
- * Example:
- *     #include <assert.h>
- *     ...
- *             assert(alloc_check(pool, 32*1024*1024));
- */
-bool alloc_check(void *pool, unsigned long poolsize);
-
-/**
- * alloc_visualize - dump information about the allocation pool
- * @pool: the contiguous bytes for the allocator to use
- * @poolsize: the size of the pool
- *
- * When debugging the allocator itself, it's often useful to see how
- * the pool is being used.  alloc_visualize() does that, but makes
- * assumptions about correctness (like the rest of the code) so if you
- * suspect corruption call alloc_check() first.
- *
- * Example:
- *     d = alloc_get(pool, 32*1024*1024, sizeof(*d), ALIGNOF(*d));
- *     if (!d) {
- *             fprintf(stderr, "Allocation failed!\n");
- *             if (!alloc_check(pool, 32*1024*1024))
- *                     errx(1, "Allocation pool is corrupt");
- *             alloc_visualize(stderr, pool, 32*1024*1024);
- *             exit(1);
- *     }
- */
-void alloc_visualize(FILE *out, void *pool, unsigned long poolsize);
-#endif /* ALLOC_H */
diff --git a/ccan/alloc/bitops.c b/ccan/alloc/bitops.c
deleted file mode 100644 (file)
index 78c2b24..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/* Licensed under LGPLv2.1+ - see LICENSE file for details */
-#include "bitops.h"
-#include "config.h"
-#include <ccan/build_assert/build_assert.h>
-#include <ccan/short_types/short_types.h>
-#include <ccan/ilog/ilog.h>
-#include <limits.h>
-
-unsigned int afls(unsigned long val)
-{
-       BUILD_ASSERT(sizeof(val) == sizeof(u32) || sizeof(val) == sizeof(u64));
-       if (sizeof(val) == sizeof(u32))
-               return ilog32(val);
-       else
-               return ilog64(val);
-}
-
-/* FIXME: Move to bitops. */
-unsigned int affsl(unsigned long val)
-{
-#if HAVE_BUILTIN_FFSL
-       /* This is significantly faster! */
-       return __builtin_ffsl(val);
-#else
-       unsigned int r = 1;
-
-       if (!val)
-               return 0;
-       if (sizeof(long) == sizeof(u64)) {
-               if (!(val & 0xffffffff)) {
-                       /* Workaround gcc warning on 32-bit:
-                          error: right shift count >= width of type */
-                       u64 tmp = val;
-                       tmp >>= 32;
-                       val = tmp;
-                       r += 32;
-               }
-       }
-       if (!(val & 0xffff)) {
-               val >>= 16;
-               r += 16;
-       }
-       if (!(val & 0xff)) {
-               val >>= 8;
-               r += 8;
-       }
-       if (!(val & 0xf)) {
-               val >>= 4;
-               r += 4;
-       }
-       if (!(val & 3)) {
-               val >>= 2;
-               r += 2;
-       }
-       if (!(val & 1)) {
-               val >>= 1;
-               r += 1;
-       }
-       return r;
-#endif
-}
-
-unsigned int popcount(unsigned long val)
-{
-#if HAVE_BUILTIN_POPCOUNTL
-       return __builtin_popcountl(val);
-#else
-       if (sizeof(long) == sizeof(u64)) {
-               u64 v = val;
-               v = (v & 0x5555555555555555ULL)
-                       + ((v >> 1) & 0x5555555555555555ULL);
-               v = (v & 0x3333333333333333ULL)
-                       + ((v >> 2) & 0x3333333333333333ULL);
-               v = (v & 0x0F0F0F0F0F0F0F0FULL)
-                       + ((v >> 4) & 0x0F0F0F0F0F0F0F0FULL);
-               v = (v & 0x00FF00FF00FF00FFULL)
-                       + ((v >> 8) & 0x00FF00FF00FF00FFULL);
-               v = (v & 0x0000FFFF0000FFFFULL)
-                       + ((v >> 16) & 0x0000FFFF0000FFFFULL);
-               v = (v & 0x00000000FFFFFFFFULL)
-                       + ((v >> 32) & 0x00000000FFFFFFFFULL);
-               return v;
-       }
-       val = (val & 0x55555555ULL) + ((val >> 1) & 0x55555555ULL);
-       val = (val & 0x33333333ULL) + ((val >> 2) & 0x33333333ULL);
-       val = (val & 0x0F0F0F0FULL) + ((val >> 4) & 0x0F0F0F0FULL);
-       val = (val & 0x00FF00FFULL) + ((val >> 8) & 0x00FF00FFULL);
-       val = (val & 0x0000FFFFULL) + ((val >> 16) & 0x0000FFFFULL);
-       return val;
-#endif
-}
-
-unsigned long align_up(unsigned long x, unsigned long align)
-{
-       return (x + align - 1) & ~(align - 1);
-}
diff --git a/ccan/alloc/bitops.h b/ccan/alloc/bitops.h
deleted file mode 100644 (file)
index 3feeed2..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Licensed under LGPLv2.1+ - see LICENSE file for details */
-#ifndef CCAN_ALLOC_BITOPS_H
-#define CCAN_ALLOC_BITOPS_H
-unsigned int afls(unsigned long val);
-unsigned int affsl(unsigned long val);
-unsigned int popcount(unsigned long val);
-unsigned long align_up(unsigned long x, unsigned long align);
-#endif /* CCAN_ALLOC_BITOPS_H */
diff --git a/ccan/alloc/test/run-corrupt.c b/ccan/alloc/test/run-corrupt.c
deleted file mode 100644 (file)
index 3e7be17..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Example allocation which caused corruption. */
-#include <ccan/alloc/alloc.c>
-#include <ccan/alloc/bitops.c>
-#include <ccan/alloc/tiny.c>
-#include <ccan/tap/tap.h>
-#include <stdlib.h>
-
-int main(int argc, char *argv[])
-{
-       void *mem;
-
-       plan_tests(7);
-
-       mem = malloc(1179648);
-       alloc_init(mem, 1179648);
-       ok1(alloc_check(mem, 1179648));
-       ok1(alloc_get(mem, 1179648, 48, 16));
-       ok1(alloc_check(mem, 1179648));
-       ok1(alloc_get(mem, 1179648, 53, 16));
-       ok1(alloc_check(mem, 1179648));
-       ok1(alloc_get(mem, 1179648, 53, 16));
-       ok1(alloc_check(mem, 1179648));
-       free(mem);
-
-       return exit_status();
-}
diff --git a/ccan/alloc/test/run-testsize.c b/ccan/alloc/test/run-testsize.c
deleted file mode 100644 (file)
index c70770c..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-#include <ccan/alloc/alloc.h>
-#include <ccan/tap/tap.h>
-#include <ccan/alloc/alloc.c>
-#include <ccan/alloc/bitops.c>
-#include <ccan/alloc/tiny.c>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <err.h>
-
-static void invert_bytes(unsigned char *p, unsigned long size)
-{
-       unsigned int i;
-
-       for (i = 0; i < size; i++)
-               p[i] ^= 0xFF;
-}
-
-static bool sizes_ok(void *mem, unsigned long poolsize, void *p[], unsigned num)
-{
-       unsigned int i;
-
-       for (i = 0; i < num; i++)
-               if (p[i] && alloc_size(mem, poolsize, p[i]) < i)
-                       return false;
-       return true;
-}
-
-static void test_pool(unsigned long pool_size)
-{
-       unsigned int i, num;
-       void *mem;
-       void **p;
-       bool flip = false;
-
-       p = calloc(pool_size, sizeof(void *));
-       mem = malloc(pool_size);
-
-       alloc_init(mem, pool_size);
-
-       /* Check that alloc_size() gives reasonable answers. */
-       for (i = 0; i < pool_size; i = i * 3 / 2 + 1) {
-               p[i] = alloc_get(mem, pool_size, i, 1);
-               if (!p[i])
-                       break;
-               invert_bytes(p[i], alloc_size(mem, pool_size, p[i]));
-       }
-       ok1(i < pool_size);
-       num = i;
-       ok1(alloc_check(mem, pool_size));
-       ok1(sizes_ok(mem, pool_size, p, num));
-
-       /* Free every second one. */
-       for (i = 0; i < num; i = i * 3 / 2 + 1) {
-               flip = !flip;
-               if (flip) {
-                       invert_bytes(p[i], alloc_size(mem,pool_size,p[i]));
-                       continue;
-               }
-               alloc_free(mem, pool_size, p[i]);
-               p[i] = NULL;
-       }
-       ok1(alloc_check(mem, pool_size));
-       ok1(sizes_ok(mem, pool_size, p, num));
-       free(p);
-       free(mem);
-}
-
-int main(int argc, char *argv[])
-{
-       plan_tests(10);
-
-       /* Large test. */
-       test_pool(MIN_USEFUL_SIZE * 2);
-
-       /* Small test. */
-       test_pool(MIN_USEFUL_SIZE / 2);
-
-       return exit_status();
-}
diff --git a/ccan/alloc/test/run-tiny-encode.c b/ccan/alloc/test/run-tiny-encode.c
deleted file mode 100644 (file)
index 8f54862..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-#include <ccan/tap/tap.h>
-#include "config.h"
-#include <ccan/alloc/tiny.c>
-#include <ccan/alloc/bitops.c>
-#include <stdlib.h>
-#include <err.h>
-
-/* Test encoding and decoding. */
-#define ARR_SIZE 10
-
-int main(void)
-{
-       unsigned char array[ARR_SIZE];
-       unsigned int i, prev;
-
-       plan_tests(405);
-
-       prev = 0;
-       /* Test encode_length */
-       for (i = 1; i < 0x8000000; i *= 2) {
-               ok1(encode_length(i-1) >= prev);
-               ok1(encode_length(i) >= encode_length(i-1));
-               ok1(encode_length(i+1) >= encode_length(i));
-               prev = encode_length(i);
-       }
-
-       /* Test it against actual encoding return val. */
-       for (i = 1; i < 0x8000000; i *= 2) {
-               ok1(encode_length(i-1) == encode(i - 1 + MIN_BLOCK_SIZE,
-                                                false, array));
-               ok1(encode_length(i) == encode(i + MIN_BLOCK_SIZE,
-                                              false, array));
-               ok1(encode_length(i+1) == encode(i + 1 + MIN_BLOCK_SIZE,
-                                                false, array));
-       }
-
-       /* Test encoder vs. decoder. */
-       for (i = 1; i < 0x8000000; i *= 2) {
-               unsigned long hdrlen, len;
-               bool free;
-
-               hdrlen = encode(i - 1 + MIN_BLOCK_SIZE, false, array);
-               ok1(decode(&len, &free, array) == hdrlen);
-               ok1(len == i - 1 + MIN_BLOCK_SIZE);
-               ok1(free == false);
-
-               hdrlen = encode(i + MIN_BLOCK_SIZE, true, array);
-               ok1(decode(&len, &free, array) == hdrlen);
-               ok1(len == i + MIN_BLOCK_SIZE);
-               ok1(free == true);
-
-               hdrlen = encode(i + 1 + MIN_BLOCK_SIZE, true, array);
-               ok1(decode(&len, &free, array) == hdrlen);
-               ok1(len == i + 1 + MIN_BLOCK_SIZE);
-               ok1(free == true);
-       }
-
-       return exit_status();
-}
diff --git a/ccan/alloc/test/run.c b/ccan/alloc/test/run.c
deleted file mode 100644 (file)
index f9981e7..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-#include <ccan/alloc/alloc.h>
-#include <ccan/tap/tap.h>
-#include <ccan/alloc/alloc.c>
-#include <ccan/alloc/bitops.c>
-#include <ccan/alloc/tiny.c>
-#include <stdlib.h>
-#include <err.h>
-
-#define sort(p, num, cmp) \
-       qsort((p), (num), sizeof(*p), (int(*)(const void *, const void *))cmp)
-
-static int addr_cmp(void **a, void **b)
-{
-       return (char *)(*a) - (char *)(*b);
-}
-
-static bool unique(void *p[], unsigned int num)
-{
-       unsigned int i;
-
-       for (i = 1; i < num; i++)
-               if (p[i] == p[i-1])
-                       return false;
-       return true;
-}      
-
-static bool free_every_second_one(void *mem, unsigned int num, 
-                                 unsigned long pool_size, void *p[])
-{
-       unsigned int i;
-
-       /* Free every second one. */
-       for (i = 0; i < num; i += 2) {
-               alloc_free(mem, pool_size, p[i]);
-       }
-       if (!alloc_check(mem, pool_size))
-               return false;
-       for (i = 1; i < num; i += 2) {
-               alloc_free(mem, pool_size, p[i]);
-       }
-       if (!alloc_check(mem, pool_size))
-               return false;
-       return true;
-}
-
-static void test(unsigned int pool_size)
-{
-       void *mem;
-       unsigned int i, num, max_size;
-       void **p = calloc(pool_size, sizeof(*p));
-       unsigned alloc_limit = pool_size / 2;
-
-       mem = malloc(pool_size);
-
-       /* Small pool, all allocs fail, even 0-length. */
-       alloc_init(mem, 0);
-       ok1(alloc_check(mem, 0));
-       ok1(alloc_get(mem, 0, 1, 1) == NULL);
-       ok1(alloc_get(mem, 0, 128, 1) == NULL);
-       ok1(alloc_get(mem, 0, 0, 1) == NULL);
-
-       /* Free of NULL should work. */
-       alloc_free(mem, 0, NULL);
-
-       alloc_init(mem, pool_size);
-       ok1(alloc_check(mem, pool_size));
-       /* Find largest allocation which works. */
-       for (max_size = pool_size + 1; max_size; max_size--) {
-               p[0] = alloc_get(mem, pool_size, max_size, 1);
-               if (p[0])
-                       break;
-       }
-       ok1(max_size < pool_size);
-       ok1(max_size > 0);
-       ok1(alloc_check(mem, pool_size));
-       ok1(alloc_size(mem, pool_size, p[0]) >= max_size);
-
-       /* Free it, should be able to reallocate it. */
-       alloc_free(mem, pool_size, p[0]);
-       ok1(alloc_check(mem, pool_size));
-
-       p[0] = alloc_get(mem, pool_size, max_size, 1);
-       ok1(p[0]);
-       ok1(alloc_size(mem, pool_size, p[0]) >= max_size);
-       ok1(alloc_check(mem, pool_size));
-       alloc_free(mem, pool_size, p[0]);
-       ok1(alloc_check(mem, pool_size));
-
-       /* Allocate a whole heap. */
-       for (i = 0; i < pool_size; i++) {
-               p[i] = alloc_get(mem, pool_size, 1, 1);
-               if (!p[i])
-                       break;
-       }
-
-       /* Uncomment this for a more intuitive view of what the
-        * allocator looks like after all these 1 byte allocs. */
-#if 0
-       alloc_visualize(stderr, mem, pool_size);
-#endif
-
-       num = i;
-       /* Can't allocate this many. */
-       ok1(num != pool_size);
-       ok1(alloc_check(mem, pool_size));
-
-       /* Sort them. */
-       sort(p, num, addr_cmp);
-
-       /* Uniqueness check */
-       ok1(unique(p, num));
-
-       ok1(free_every_second_one(mem, num, pool_size, p));
-       ok1(alloc_check(mem, pool_size));
-
-       /* Should be able to reallocate max size. */
-       p[0] = alloc_get(mem, pool_size, max_size, 1);
-       ok1(p[0]);
-       ok1(alloc_check(mem, pool_size));
-       ok1(alloc_size(mem, pool_size, p[0]) >= max_size);
-
-       /* Re-initializing should be the same as freeing everything */
-       alloc_init(mem, pool_size);
-       ok1(alloc_check(mem, pool_size));
-       p[0] = alloc_get(mem, pool_size, max_size, 1);
-       ok1(p[0]);
-       ok1(alloc_size(mem, pool_size, p[0]) >= max_size);
-       ok1(alloc_check(mem, pool_size));
-       alloc_free(mem, pool_size, p[0]);
-       ok1(alloc_check(mem, pool_size));
-
-       /* Alignment constraints should be met, as long as powers of two */
-       for (i = 0; (1 << i) < alloc_limit; i++) {
-               p[i] = alloc_get(mem, pool_size, i, 1 << i);
-               ok1(p[i]);
-               ok1(((char *)p[i] - (char *)mem) % (1 << i) == 0);
-               ok1(alloc_check(mem, pool_size));
-               ok1(alloc_size(mem, pool_size, p[i]) >= i);
-       }
-
-       for (i = 0; (1 << i) < alloc_limit; i++) {
-               alloc_free(mem, pool_size, p[i]);
-               ok1(alloc_check(mem, pool_size));
-       }
-
-       /* Alignment constraints for a single-byte allocation. */
-       for (i = 0; (1 << i) < alloc_limit; i++) {
-               p[0] = alloc_get(mem, pool_size, 1, 1 << i);
-               ok1(p[0]);
-               ok1(((char *)p[0] - (char *)mem) % (1 << i) == 0);
-               ok1(alloc_check(mem, pool_size));
-               ok1(alloc_size(mem, pool_size, p[0]) >= 1);
-               alloc_free(mem, pool_size, p[0]);
-               ok1(alloc_check(mem, pool_size));
-       }
-
-       /* Alignment check for a 0-byte allocation.  Corner case. */
-       p[0] = alloc_get(mem, pool_size, 0, alloc_limit);
-       ok1(alloc_check(mem, pool_size));
-       ok1(alloc_size(mem, pool_size, p[0]) < pool_size);
-       alloc_free(mem, pool_size, p[0]);
-       ok1(alloc_check(mem, pool_size));
-
-       free(mem);
-       free(p);
-}
-
-int main(int argc, char *argv[])
-{
-       plan_tests(440);
-
-       /* Large test. */
-       test(MIN_USEFUL_SIZE * 2);
-
-       /* Small test. */
-       test(MIN_USEFUL_SIZE / 2);
-
-       return exit_status();
-}
diff --git a/ccan/alloc/tiny.c b/ccan/alloc/tiny.c
deleted file mode 100644 (file)
index ffd17c6..0000000
+++ /dev/null
@@ -1,431 +0,0 @@
-/* Licensed under LGPLv2.1+ - see LICENSE file for details */
-#include "tiny.h"
-#include "bitops.h"
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-
-/* One byte header, one byte data. */
-#define MIN_BLOCK_SIZE 2
-
-/* Bit 7 (in any byte) == start or end. */
-#define TERM_BIT 0x80
-/* Bit 6 (first and last byte) == one byte long. */
-#define SINGLE_BYTE 0x40
-/* Bit 5 (of first byte) == "is this block free?" */
-#define FREE_BIT 0x20
-
-#define MAX_FREE_CACHED_SIZE 256
-
-/* Val is usually offset by MIN_BLOCK_SIZE here. */
-static unsigned encode_length(unsigned long val)
-{
-       unsigned int bits = afls(val);
-       /* 5 bits in first byte. */
-       if (bits <= 5)
-               return 1;
-       /* 6 bits in last byte, 7 bits in middle ones. */
-       return 2 + (bits - 5) / 7;
-}
-
-/* Header is included in length, so we might need an extra byte. */
-static unsigned encode_len_with_header(unsigned long len)
-{
-       unsigned int hdrlen = 1;
-
-       assert(len);
-       while (encode_length(len + hdrlen - MIN_BLOCK_SIZE) != hdrlen)
-               hdrlen = encode_length(len + hdrlen - MIN_BLOCK_SIZE);
-
-       return hdrlen;
-}
-
-/* Encoding can be read from front or back; 0 is invalid at either
- * start or end.  Returns bytes used for header. */
-static unsigned encode(unsigned long len, bool free, unsigned char arr[])
-{
-       unsigned int hdrlen = 1;
-
-       /* We can never have a length < MIN_BLOCK_SIZE. */
-       assert(len >= MIN_BLOCK_SIZE);
-       len -= MIN_BLOCK_SIZE;
-
-       /* First byte always contains free bit. */
-       arr[0] = TERM_BIT | (free ? FREE_BIT : 0);
-       /* Also holds 5 bits of data (0 - 31) */
-       arr[0] |= (len & 0x1F);
-       len >>= 5;
-
-       /* One byte only? */
-       if (!len) {
-               arr[0] |= SINGLE_BYTE;
-               return hdrlen;
-       }
-
-       /* Middle bytes. */
-       while (len >= (1 << 6)) {
-               /* Next 7 data bits */
-               arr[hdrlen++] = (len & 0x7F);
-               len >>= 7;
-       }
-       arr[hdrlen++] = (len | TERM_BIT);
-       return hdrlen;
-}
-
-/* Returns bytes used for header. */
-static unsigned decode(unsigned long *len, bool *free, const unsigned char *arr)
-{
-       unsigned int hdrlen = 0, bits = 5;
-
-       /* Free flag is in bit 5 */
-       *free = (arr[hdrlen] & FREE_BIT);
-       /* Bottom five bits are data. */
-       *len = (arr[hdrlen] & 0x1f);
-       if (!(arr[hdrlen++] & SINGLE_BYTE)) {
-               /* Multi-byte encoding? */
-               while (!(arr[hdrlen] & TERM_BIT)) {
-                       /* 7 more data bits. */
-                       *len |= (arr[hdrlen] & 0x7fUL) << bits;
-                       hdrlen++;
-                       bits += 7;
-               }
-               /* Final byte has 6 bits. */
-               *len |= (arr[hdrlen] & 0x3fUL) << bits;
-               hdrlen++;
-       }
-
-       *len += MIN_BLOCK_SIZE;
-       return hdrlen;
-}
-
-/* We keep a helper array for freed mem, one byte per k. */
-static unsigned long free_array_size(unsigned long poolsize)
-{
-       return poolsize / 1024;
-}
-
-/* We have series of 69 free sizes like so:
- * 1, 2, 3, 4.  6, 8, 10, 12, 14, 16. 20, 24, 28, 32... 252.
- */
-static unsigned long free_array_off(unsigned long size)
-{
-       unsigned long off;
-
-       if (size <= 4)
-               off = size - 1;
-       else if (size <= 16)
-               off = size / 2 + 1;
-       else
-               off = size / 4 + 5;
-
-       off *= 3;
-       return off;
-}
-
-void tiny_alloc_init(void *pool, unsigned long poolsize)
-{
-       /* We start with free array, and then the rest is free. */
-       unsigned long arrsize = free_array_size(poolsize);
-
-       /* Do nothing with 1 byte or less! */
-       if (poolsize < MIN_BLOCK_SIZE)
-               return;
-
-       memset(pool, 0, arrsize);
-       encode(poolsize - arrsize, true, (unsigned char *)pool + arrsize);
-}
-
-/* Walk through and try to coalesce */
-static bool try_coalesce(unsigned char *pool, unsigned long poolsize)
-{
-       unsigned long len, prev_off = 0, prev_len = 0, off;
-       bool free, prev_free = false, coalesced = false;
-
-       off = free_array_size(poolsize);
-       do {
-               decode(&len, &free, pool + off);
-               if (free && prev_free) {
-                       prev_len += len;
-                       encode(prev_len, true, pool + prev_off);
-                       coalesced = true;
-               } else {
-                       prev_free = free;
-                       prev_off = off;
-                       prev_len = len;
-               }
-               off += len;
-       } while (off < poolsize);
-
-       /* Clear the free array. */
-       if (coalesced)
-               memset(pool, 0, free_array_size(poolsize));
-
-       return coalesced;
-}
-
-static bool long_enough(unsigned long offset, unsigned long len,
-                       unsigned long size, unsigned long align)
-{
-       unsigned long end = offset + len;
-
-       offset += encode_len_with_header(len);
-       offset = align_up(offset, align);
-       return offset + size <= end;
-}
-
-static void add_to_free_array(unsigned char *arr,
-                             unsigned long poolsize,
-                             unsigned long size,
-                             unsigned long off)
-{
-       unsigned long fa_off;
-
-       if (size >= MAX_FREE_CACHED_SIZE)
-               return;
-
-       for (fa_off = free_array_off(size);
-            fa_off + 3 < free_array_size(poolsize);
-            fa_off += free_array_off(MAX_FREE_CACHED_SIZE)) {
-               if (!arr[fa_off] && !arr[fa_off+1] && !arr[fa_off+2]) {
-                       arr[fa_off] = (off >> 16);
-                       arr[fa_off+1] = (off >> 8);
-                       arr[fa_off+2] = off;
-                       break;
-               }
-       }
-}
-
-void *tiny_alloc_get(void *pool, unsigned long poolsize,
-                    unsigned long size, unsigned long align)
-{
-       unsigned long arrsize = free_array_size(poolsize);
-       unsigned long len, off, actual, hdr, free_bucket;
-       long fa_off;
-       unsigned char *arr = pool;
-       bool free, coalesced = false;
-
-       /* We can't do anything with tiny pools. */
-       if (poolsize < MIN_BLOCK_SIZE)
-               return NULL;
-
-       /* We don't do zero-allocs; allows 1 more offset in encoding. */
-       if (!size)
-               size = 1;
-
-       /* Look for entries in free array, starting from right size up. */
-       for (free_bucket = free_array_off(size);
-            free_bucket < free_array_off(MAX_FREE_CACHED_SIZE);
-            free_bucket += 3) {
-               for (fa_off = free_bucket;
-                    fa_off + 3 < free_array_size(poolsize);
-                    fa_off += free_array_off(MAX_FREE_CACHED_SIZE)) {
-                       off = ((unsigned long)arr[fa_off]) << 16
-                               | ((unsigned long)arr[fa_off+1]) << 8
-                               | ((unsigned long)arr[fa_off+2]);
-                       if (!off)
-                               continue;
-
-                       decode(&len, &free, arr + off);
-                       if (long_enough(off, len, size, align)) {
-                               /* Remove it. */
-                               memset(&arr[fa_off], 0, 3);
-                               goto found;
-                       }
-               }
-       }
-
-again:
-       off = arrsize;
-
-       decode(&len, &free, arr + off);
-       while (!free || !long_enough(off, len, size, align)) {
-               /* Refill free array as we go. */
-               if (free && coalesced)
-                       add_to_free_array(arr, poolsize, len, off);
-
-               off += len;
-               /* Hit end? */
-               if (off == poolsize) {
-                       if (!coalesced && try_coalesce(pool, poolsize)) {
-                               coalesced = true;
-                               goto again;
-                       }
-                       return NULL;
-               }
-               decode(&len, &free, arr + off);
-       }
-
-found:
-       /* We have a free block.  Since we walk from front, take far end. */
-       actual = ((off + len - size) & ~(align - 1));
-       hdr = actual - encode_len_with_header(off + len - actual);
-
-
-       /* Do we have enough room to split? */
-       if (hdr - off >= MIN_BLOCK_SIZE) {
-               encode(hdr - off, true, arr + off);
-               add_to_free_array(arr, poolsize, hdr - off, off);
-       } else {
-               hdr = off;
-       }
-
-       /* Make sure that we are all-zero up to actual, so we can walk back
-        * and find header. */
-       memset(arr + hdr, 0, actual - hdr);
-
-       /* Create header for allocated block. */
-       encode(off + len - hdr, false, arr + hdr);
-
-       return arr + actual;
-}
-
-static unsigned char *to_hdr(void *p)
-{
-       unsigned char *hdr = p;
-
-       /* Walk back to find end of header. */
-       while (!*(--hdr));
-       assert(*hdr & TERM_BIT);
-
-       /* Now walk back to find start of header. */
-       if (!(*hdr & SINGLE_BYTE)) {
-               while (!(*(--hdr) & TERM_BIT));
-       }
-       return hdr;
-}
-
-void tiny_alloc_free(void *pool, unsigned long poolsize, void *freep)
-{
-       unsigned long len;
-       unsigned char *arr = pool;
-       unsigned char *hdr;
-       bool free;
-
-       /* Too small to do anything. */
-       if (poolsize < MIN_BLOCK_SIZE)
-               return;
-
-       hdr = to_hdr(freep);
-
-       decode(&len, &free, hdr);
-       assert(!free);
-       hdr[0] |= FREE_BIT;
-
-       /* If an empty slot, put this in free array. */
-       add_to_free_array(pool, poolsize, len, hdr - arr);
-}
-
-unsigned long tiny_alloc_size(void *pool, unsigned long poolsize, void *p)
-{
-       unsigned char *hdr = to_hdr(p);
-       unsigned long len, hdrlen;
-       bool free;
-
-       hdrlen = decode(&len, &free, hdr);
-       return len - hdrlen;
-}
-
-/* Useful for gdb breakpoints. */
-static bool tiny_check_fail(void)
-{
-       return false;
-}
-
-static bool check_decode(const unsigned char *arr, unsigned long len)
-{
-       unsigned int i;
-
-       if (len == 0)
-               return tiny_check_fail();
-       if (!(arr[0] & TERM_BIT))
-               return tiny_check_fail();
-       if (arr[0] & SINGLE_BYTE)
-               return true;
-       for (i = 1; i < len; i++) {
-               if (arr[i] & TERM_BIT)
-                       return true;
-       }
-       return tiny_check_fail();
-}
-
-bool tiny_alloc_check(void *pool, unsigned long poolsize)
-{
-       unsigned long arrsize = free_array_size(poolsize);
-       unsigned char *arr = pool;
-       unsigned long len, off, hdrlen;
-       unsigned long i, freearr[arrsize], num_freearr = 0;
-       bool free;
-
-       if (poolsize < MIN_BLOCK_SIZE)
-               return true;
-
-       for (i = 0; i + 3 < free_array_size(poolsize); i += 3) {
-               off = ((unsigned long)arr[i]) << 16
-                       | ((unsigned long)arr[i+1]) << 8
-                       | ((unsigned long)arr[i+2]);
-               if (!off)
-                       continue;
-
-               if (off >= poolsize)
-                       return tiny_check_fail();
-               freearr[num_freearr++] = off;
-       }
-
-       for (off = arrsize; off < poolsize; off += len) {
-               /* We should have a valid header. */
-               if (!check_decode(arr + off, poolsize - off))
-                       return false;
-               hdrlen = decode(&len, &free, arr + off);
-               if (off + len > poolsize)
-                       return tiny_check_fail();
-               if (hdrlen != encode_length(len - MIN_BLOCK_SIZE))
-                       return tiny_check_fail();
-               for (i = 0; i < num_freearr; i++) {
-                       if (freearr[i] == off) {
-                               if (!free)
-                                       return tiny_check_fail();
-                               memmove(&freearr[i], &freearr[i+1],
-                                       (num_freearr-- - (i + 1))
-                                       * sizeof(freearr[i]));
-                               break;
-                       }
-               }
-       }
-
-       /* Now we should have found everything in freearr. */
-       if (num_freearr)
-               return tiny_check_fail();
-
-       /* Now check that sizes are correct. */
-       for (i = 0; i + 3 < free_array_size(poolsize); i += 3) {
-               unsigned long fa_off;
-
-               off = ((unsigned long)arr[i]) << 16
-                       | ((unsigned long)arr[i+1]) << 8
-                       | ((unsigned long)arr[i+2]);
-               if (!off)
-                       continue;
-
-               decode(&len, &free, arr + off);
-
-               /* Would we expect to find this length in this bucket? */
-               if (len >= MAX_FREE_CACHED_SIZE)
-                       return tiny_check_fail();
-
-               for (fa_off = free_array_off(len);
-                    fa_off + 3 < free_array_size(poolsize);
-                    fa_off += free_array_off(MAX_FREE_CACHED_SIZE)) {
-                       if (fa_off == i)
-                               break;
-               }
-               if (fa_off != i)
-                       return tiny_check_fail();
-       }
-
-       return true;
-}
-
-/* FIXME: Implement. */
-void tiny_alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
-{
-}
diff --git a/ccan/alloc/tiny.h b/ccan/alloc/tiny.h
deleted file mode 100644 (file)
index 5ed4ee1..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/* Licensed under LGPLv2.1+ - see LICENSE file for details */
-#ifndef CCAN_TINY_H
-#define CCAN_TINY_H
-#include <stdbool.h>
-#include <stdio.h>
-
-void tiny_alloc_init(void *pool, unsigned long poolsize);
-void *tiny_alloc_get(void *pool, unsigned long poolsize,
-                    unsigned long size, unsigned long align);
-void tiny_alloc_free(void *pool, unsigned long poolsize, void *free);
-unsigned long tiny_alloc_size(void *pool, unsigned long poolsize, void *p);
-bool tiny_alloc_check(void *pool, unsigned long poolsize);
-void tiny_alloc_visualize(FILE *out, void *pool, unsigned long poolsize);
-
-#endif /* CCAN_TINY_H */
index ca8cd16c597b785733cac127d8316ab07d663f24..b4cf67fc8bfa559492fdc34f9147ad5aa744d686 100644 (file)
@@ -90,7 +90,7 @@ int main(int argc, char *argv[])
                return 1;
 
        if (strcmp(argv[1], "depends") == 0) {
-               printf("ccan/alloc\n");
+               printf("ccan/antithread/alloc\n");
                printf("ccan/err\n");
                printf("ccan/list\n");
                printf("ccan/noerr\n");
diff --git a/ccan/antithread/alloc/LICENSE b/ccan/antithread/alloc/LICENSE
new file mode 120000 (symlink)
index 0000000..4d0b239
--- /dev/null
@@ -0,0 +1 @@
+../../../licenses/LGPL-2.1
\ No newline at end of file
diff --git a/ccan/antithread/alloc/_info b/ccan/antithread/alloc/_info
new file mode 100644 (file)
index 0000000..5ad1800
--- /dev/null
@@ -0,0 +1,115 @@
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+
+/**
+ * antithread/alloc - memory allocator routines
+ *
+ * The alloc module implements a simple allocator which you can use to
+ * dynamically allocate space within a region of memory.  This can be useful
+ * for suballocations within a given region, or a memory-mapped file.
+ *
+ * All metadata is kept within the memory handed to the allocator: you only
+ * need hand the pointer and the size of the memory to each call.
+ *
+ * The region contents is always in offsets, so it can be mapped in different
+ * places, but is not endian-safe.
+ *
+ * Example:
+ *     #include <sys/mman.h>
+ *     #include <unistd.h>
+ *     #include <sys/types.h>
+ *     #include <err.h>
+ *     #include <sys/stat.h>
+ *     #include <fcntl.h>
+ *     #include <string.h>
+ *     #include <stdlib.h>
+ *     #include <ccan/antithread/alloc/alloc.h>
+ *
+ *     static void usage(const char *name)
+ *     {
+ *             errx(1, "Usage: %s --create <mapfile>\n"
+ *                  " %s --check <mapfile>\n"
+ *                  " %s --alloc <mapfile>\n"
+ *                  " %s --free=<offset> <mapfile>\n", name, name, name, name);
+ *     }
+ *
+ *     // Create a memory mapped file, and allocate from within it
+ *     int main(int argc, char *argv[])
+ *     {
+ *             void *a, *p;
+ *             int fd;
+ *             enum { CREATE, CHECK, ALLOC, FREE } cmd;
+ *
+ *             if (argc != 3)
+ *                     usage(argv[0]);
+ *
+ *             if (strcmp(argv[1], "--create") == 0)
+ *                     cmd = CREATE;
+ *             else if (strcmp(argv[1], "--check") == 0)
+ *                     cmd = CHECK;
+ *             else if (strcmp(argv[1], "--alloc") == 0)
+ *                     cmd = ALLOC;
+ *             else if (strncmp(argv[1], "--free=", strlen("--free=")) == 0)
+ *                     cmd = FREE;
+ *             else
+ *                     usage(argv[0]);
+ *
+ *             if (cmd == CREATE) {
+ *                     fd = open(argv[2], O_RDWR|O_CREAT|O_EXCL, 0600);
+ *                     if (fd < 0)
+ *                             err(1, "Could not create %s", argv[2]);
+ *                     if (ftruncate(fd, 1048576) != 0)
+ *                             err(1, "Could not set length on %s", argv[2]);
+ *             } else {
+ *                     fd = open(argv[2], O_RDWR);
+ *                     if (fd < 0)
+ *                             err(1, "Could not open %s", argv[2]);
+ *             }
+ *
+ *             a = mmap(NULL, 1048576, PROT_READ|PROT_WRITE, MAP_SHARED, fd,0);
+ *             if (a == MAP_FAILED)
+ *                     err(1, "Could not map %s", argv[2]);
+ *
+ *             switch (cmd) {
+ *             case CREATE:
+ *                     alloc_init(a, 1048576);
+ *                     break;
+ *             case CHECK:
+ *                     if (!alloc_check(a, 1048576))
+ *                             err(1, "Region is corrupt");
+ *                     break;
+ *             case ALLOC:
+ *                     p = alloc_get(a, 1048576, 1024, 16);
+ *                     if (!p)
+ *                             errx(1, "Could not allocate");
+ *                     printf("%zu\n", (char *)p - (char *)a);
+ *                     break;
+ *             case FREE:
+ *                     p = (char *)a + atol(argv[1] + strlen("--free="));
+ *                     alloc_free(a, 1048576, p);
+ *                     break;
+ *             }
+ *             return 0;
+ *     }
+ *
+ * License: LGPL (v2.1 or any later version)
+ * Author: Rusty Russell <rusty@rustcorp.com.au>
+ */
+int main(int argc, char *argv[])
+{
+       if (argc != 2)
+               return 1;
+
+       if (strcmp(argv[1], "depends") == 0) {
+               printf("ccan/alignof\n");
+               printf("ccan/build_assert\n");
+               printf("ccan/compiler\n");
+               printf("ccan/ilog\n");
+               printf("ccan/likely\n");
+               printf("ccan/short_types\n");
+               return 0;
+       }
+
+       return 1;
+}
diff --git a/ccan/antithread/alloc/alloc.c b/ccan/antithread/alloc/alloc.c
new file mode 100644 (file)
index 0000000..1b36aa4
--- /dev/null
@@ -0,0 +1,1239 @@
+/* Licensed under LGPLv2.1+ - see LICENSE file for details */
+#include <unistd.h>
+#include <stdint.h>
+#include <string.h>
+#include <limits.h>
+#include <assert.h>
+#include <stdlib.h>
+#include "alloc.h"
+#include "bitops.h"
+#include "tiny.h"
+#include <ccan/build_assert/build_assert.h>
+#include <ccan/likely/likely.h>
+#include <ccan/alignof/alignof.h>
+#include <ccan/short_types/short_types.h>
+#include <ccan/compiler/compiler.h>
+#include "config.h"
+
+/*
+   Inspired by (and parts taken from) Andrew Tridgell's alloc_mmap:
+   http://samba.org/~tridge/junkcode/alloc_mmap/
+
+   Copyright (C) Andrew Tridgell 2007
+
+   This library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2 of the License, or (at your option) any later version.
+
+   This library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with this library; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/* We divide the pool into this many large pages (nearest power of 2) */
+#define MAX_LARGE_PAGES (256UL)
+
+/* 32 small pages == 1 large page. */
+#define BITS_FROM_SMALL_TO_LARGE_PAGE 5
+
+#define MAX_SMALL_PAGES (MAX_LARGE_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE)
+
+/* Smallest pool size for this scheme: 128-byte small pages.  That's
+ * 9/13% overhead for 32/64 bit. */
+#define MIN_USEFUL_SIZE (MAX_SMALL_PAGES * 128)
+
+/* Every 4 buckets, we jump up a power of 2. ...8 10 12 14 16 20 24 28 32... */
+#define INTER_BUCKET_SPACE 4
+
+#define SMALL_PAGES_PER_LARGE_PAGE (1 << BITS_FROM_SMALL_TO_LARGE_PAGE)
+
+/* FIXME: Figure this out properly. */
+#define MAX_SIZE (1 << 30)
+
+/* How few object to fit in a page before using a larger one? (8) */
+#define MAX_PAGE_OBJECT_ORDER  3
+
+#define BITS_PER_LONG (sizeof(long) * CHAR_BIT)
+
+struct bucket_state {
+       u32 elements_per_page;
+       u16 page_list;
+       u16 full_list;
+};
+
+struct header {
+       /* Bitmap of which pages are large. */
+       unsigned long pagesize[MAX_LARGE_PAGES / BITS_PER_LONG];
+
+       /* List of unused small/large pages. */
+       u16 small_free_list;
+       u16 large_free_list;
+
+       /* List of huge allocs. */
+       unsigned long huge;
+
+       /* This is less defined: we have two buckets for each power of 2 */
+       struct bucket_state bs[1];
+};
+
+struct huge_alloc {
+       unsigned long next, prev;
+       unsigned long off, len;
+};
+
+struct page_header {
+       u16 next, prev;
+       /* FIXME: We can just count all-0 and all-1 used[] elements. */
+       unsigned elements_used : 25;
+       unsigned bucket : 7;
+       unsigned long used[1]; /* One bit per element. */
+};
+
+/*
+ * Every 4 buckets, the size doubles.
+ * Between buckets, sizes increase linearly.
+ *
+ * eg. bucket 40 = 2^10                        = 1024
+ *     bucket 41 = 2^10 + 2^10*4       = 1024 + 256
+ *     bucket 42 = 2^10 + 2^10*4       = 1024 + 512
+ *     bucket 43 = 2^10 + 2^10*4       = 1024 + 768
+ *     bucket 45 = 2^11                        = 2048
+ *
+ * Care is taken to handle low numbered buckets, at cost of overflow.
+ */
+static unsigned long bucket_to_size(unsigned int bucket)
+{
+       unsigned long base = 1UL << (bucket / INTER_BUCKET_SPACE);
+       return base + ((bucket % INTER_BUCKET_SPACE)
+                      << (bucket / INTER_BUCKET_SPACE))
+               / INTER_BUCKET_SPACE;
+}
+
+/*
+ * Say size is 10.
+ *   fls(size/2) == 3.  1 << 3 == 8, so we're 2 too large, out of a possible
+ * 8 too large.  That's 1/4 of the way to the next power of 2 == 1 bucket.
+ *
+ * We make sure we round up.  Note that this fails on 32 bit at size
+ * 1879048193 (around bucket 120).
+ */
+static unsigned int size_to_bucket(unsigned long size)
+{
+       unsigned int base = afls(size/2);
+       unsigned long overshoot;
+
+       overshoot = size - (1UL << base);
+       return base * INTER_BUCKET_SPACE
+               + ((overshoot * INTER_BUCKET_SPACE + (1UL << base)-1) >> base);
+}
+
+static unsigned int small_page_bits(unsigned long poolsize)
+{
+       return afls(poolsize / MAX_SMALL_PAGES - 1);
+}
+
+static struct page_header *from_pgnum(struct header *head,
+                                     unsigned long pgnum,
+                                     unsigned sp_bits)
+{
+       return (struct page_header *)((char *)head + (pgnum << sp_bits));
+}
+
+static u16 to_pgnum(struct header *head, void *p, unsigned sp_bits)
+{
+       return ((char *)p - (char *)head) >> sp_bits;
+}
+
+static size_t used_size(unsigned int num_elements)
+{
+       return align_up(num_elements, BITS_PER_LONG) / CHAR_BIT;
+}
+
+/*
+ * We always align the first entry to the lower power of 2.
+ * eg. the 12-byte bucket gets 8-byte aligned.  The 4096-byte bucket
+ * gets 4096-byte aligned.
+ */
+static unsigned long page_header_size(unsigned int align_bits,
+                                     unsigned long num_elements)
+{
+       unsigned long size;
+
+       size = sizeof(struct page_header)
+               - sizeof(((struct page_header *)0)->used)
+               + used_size(num_elements);
+       return align_up(size, 1UL << align_bits);
+}
+
+static void add_to_list(struct header *head,
+                       u16 *list, struct page_header *ph, unsigned sp_bits)
+{
+       unsigned long h = *list, offset = to_pgnum(head, ph, sp_bits);
+
+       ph->next = h;
+       if (h) {
+               struct page_header *prev = from_pgnum(head, h, sp_bits);
+               assert(prev->prev == 0);
+               prev->prev = offset;
+       }
+       *list = offset;
+       ph->prev = 0;
+}
+
+static void del_from_list(struct header *head,
+                         u16 *list, struct page_header *ph, unsigned sp_bits)
+{
+       /* Front of list? */
+       if (ph->prev == 0) {
+               *list = ph->next;
+       } else {
+               struct page_header *prev = from_pgnum(head, ph->prev, sp_bits);
+               prev->next = ph->next;
+       }
+       if (ph->next != 0) {
+               struct page_header *next = from_pgnum(head, ph->next, sp_bits);
+               next->prev = ph->prev;
+       }
+}
+
+static u16 pop_from_list(struct header *head,
+                                  u16 *list,
+                                  unsigned int sp_bits)
+{
+       u16 h = *list;
+       struct page_header *ph = from_pgnum(head, h, sp_bits);
+
+       if (likely(h)) {
+               *list = ph->next;
+               if (*list)
+                       from_pgnum(head, *list, sp_bits)->prev = 0;
+       }
+       return h;
+}
+
+static void add_to_huge_list(struct header *head, struct huge_alloc *ha)
+{
+       unsigned long h = head->huge;
+       unsigned long offset = (char *)ha - (char *)head;
+
+       ha->next = h;
+       if (h) {
+               struct huge_alloc *prev = (void *)((char *)head + h);
+               assert(prev->prev == 0);
+               prev->prev = offset;
+       }
+       head->huge = offset;
+       ha->prev = 0;
+}
+
+static void del_from_huge(struct header *head, struct huge_alloc *ha)
+{
+       /* Front of list? */
+       if (ha->prev == 0) {
+               head->huge = ha->next;
+       } else {
+               struct huge_alloc *prev = (void *)((char *)head + ha->prev);
+               prev->next = ha->next;
+       }
+       if (ha->next != 0) {
+               struct huge_alloc *next = (void *)((char *)head + ha->next);
+               next->prev = ha->prev;
+       }
+}
+
+static void add_small_page_to_freelist(struct header *head,
+                                      struct page_header *ph,
+                                      unsigned int sp_bits)
+{
+       add_to_list(head, &head->small_free_list, ph, sp_bits);
+}
+
+static void add_large_page_to_freelist(struct header *head,
+                                      struct page_header *ph,
+                                      unsigned int sp_bits)
+{
+       add_to_list(head, &head->large_free_list, ph, sp_bits);
+}
+
+static void add_to_bucket_list(struct header *head,
+                              struct bucket_state *bs,
+                              struct page_header *ph,
+                              unsigned int sp_bits)
+{
+       add_to_list(head, &bs->page_list, ph, sp_bits);
+}
+
+static void del_from_bucket_list(struct header *head,
+                                struct bucket_state *bs,
+                                struct page_header *ph,
+                                unsigned int sp_bits)
+{
+       del_from_list(head, &bs->page_list, ph, sp_bits);
+}
+
+static void del_from_bucket_full_list(struct header *head,
+                                     struct bucket_state *bs,
+                                     struct page_header *ph,
+                                     unsigned int sp_bits)
+{
+       del_from_list(head, &bs->full_list, ph, sp_bits);
+}
+
+static void add_to_bucket_full_list(struct header *head,
+                                   struct bucket_state *bs,
+                                   struct page_header *ph,
+                                   unsigned int sp_bits)
+{
+       add_to_list(head, &bs->full_list, ph, sp_bits);
+}
+
+static void clear_bit(unsigned long bitmap[], unsigned int off)
+{
+       bitmap[off / BITS_PER_LONG] &= ~(1UL << (off % BITS_PER_LONG));
+}
+
+static bool test_bit(const unsigned long bitmap[], unsigned int off)
+{
+       return bitmap[off / BITS_PER_LONG] & (1UL << (off % BITS_PER_LONG));
+}
+
+static void set_bit(unsigned long bitmap[], unsigned int off)
+{
+       bitmap[off / BITS_PER_LONG] |= (1UL << (off % BITS_PER_LONG));
+}
+
+/* There must be a bit to be found. */
+static unsigned int find_free_bit(const unsigned long bitmap[])
+{
+       unsigned int i;
+
+       for (i = 0; bitmap[i] == -1UL; i++);
+       return (i*BITS_PER_LONG) + affsl(~bitmap[i]) - 1;
+}
+
+/* How many elements can we fit in a page? */
+static unsigned long elements_per_page(unsigned long align_bits,
+                                      unsigned long esize,
+                                      unsigned long psize)
+{
+       unsigned long num, overhead;
+
+       /* First approximation: no extra room for bitmap. */
+       overhead = align_up(sizeof(struct page_header), 1UL << align_bits);
+       num = (psize - overhead) / esize;
+
+       while (page_header_size(align_bits, num) + esize * num > psize)
+               num--;
+       return num;
+}
+
+static bool large_page_bucket(unsigned int bucket, unsigned int sp_bits)
+{
+       unsigned long max_smallsize;
+
+       /* Note: this doesn't take into account page header. */
+       max_smallsize = (1UL << sp_bits) >> MAX_PAGE_OBJECT_ORDER;
+
+       return bucket_to_size(bucket) > max_smallsize;
+}
+
+static unsigned int max_bucket(unsigned int lp_bits)
+{
+       return (lp_bits - MAX_PAGE_OBJECT_ORDER) * INTER_BUCKET_SPACE;
+}
+
+void alloc_init(void *pool, unsigned long poolsize)
+{
+       struct header *head = pool;
+       struct page_header *ph;
+       unsigned int lp_bits, sp_bits, num_buckets;
+       unsigned long header_size, i;
+
+       if (poolsize < MIN_USEFUL_SIZE) {
+               tiny_alloc_init(pool, poolsize);
+               return;
+       }
+
+       /* We rely on page numbers fitting in 16 bit. */
+       BUILD_ASSERT(MAX_SMALL_PAGES < 65536);
+
+       sp_bits = small_page_bits(poolsize);
+       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
+
+       num_buckets = max_bucket(lp_bits);
+
+       head = pool;
+       header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
+
+       memset(head, 0, header_size);
+       for (i = 0; i < num_buckets; i++) {
+               unsigned long pagesize;
+
+               if (large_page_bucket(i, sp_bits))
+                       pagesize = 1UL << lp_bits;
+               else
+                       pagesize = 1UL << sp_bits;
+
+               head->bs[i].elements_per_page
+                       = elements_per_page(i / INTER_BUCKET_SPACE,
+                                           bucket_to_size(i),
+                                           pagesize);
+       }
+
+       /* They start as all large pages. */
+       memset(head->pagesize, 0xFF, sizeof(head->pagesize));
+       /* FIXME: small pages for last bit? */
+
+       /* Split first page into small pages. */
+       assert(header_size < (1UL << lp_bits));
+       clear_bit(head->pagesize, 0);
+
+       /* Skip over page(s) used by header, add rest to free list */
+       for (i = align_up(header_size, (1UL << sp_bits)) >> sp_bits;
+            i < SMALL_PAGES_PER_LARGE_PAGE;
+            i++) {
+               ph = from_pgnum(head, i, sp_bits);
+               ph->elements_used = 0;
+               add_small_page_to_freelist(head, ph, sp_bits);
+       }
+
+       /* Add the rest of the pages as large pages. */
+       i = SMALL_PAGES_PER_LARGE_PAGE;
+       while ((i << sp_bits) + (1UL << lp_bits) <= poolsize) {
+               assert(i < MAX_SMALL_PAGES);
+               ph = from_pgnum(head, i, sp_bits);
+               ph->elements_used = 0;
+               add_large_page_to_freelist(head, ph, sp_bits);
+               i += SMALL_PAGES_PER_LARGE_PAGE;
+       }
+}
+
+/* A large page worth of small pages are free: delete them from free list. */
+static void del_large_from_small_free_list(struct header *head,
+                                          struct page_header *ph,
+                                          unsigned int sp_bits)
+{
+       unsigned long i;
+
+       for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
+               del_from_list(head, &head->small_free_list,
+                             (struct page_header *)((char *)ph
+                                                    + (i << sp_bits)),
+                             sp_bits);
+       }
+}
+
+static bool all_empty(struct header *head,
+                     unsigned long pgnum,
+                     unsigned sp_bits)
+{
+       unsigned long i;
+
+       for (i = 0; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
+               struct page_header *ph = from_pgnum(head, pgnum + i, sp_bits);
+               if (ph->elements_used)
+                       return false;
+       }
+       return true;
+}
+
+static void recombine_small_pages(struct header *head, unsigned long poolsize,
+                                 unsigned int sp_bits)
+{
+       unsigned long i;
+       unsigned int lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
+
+       /* Look for small pages to coalesce, after first large page. */
+       for (i = SMALL_PAGES_PER_LARGE_PAGE;
+            i < (poolsize >> lp_bits) << BITS_FROM_SMALL_TO_LARGE_PAGE;
+            i += SMALL_PAGES_PER_LARGE_PAGE) {
+               /* Already a large page? */
+               if (test_bit(head->pagesize, i / SMALL_PAGES_PER_LARGE_PAGE))
+                       continue;
+               if (all_empty(head, i, sp_bits)) {
+                       struct page_header *ph = from_pgnum(head, i, sp_bits);
+                       set_bit(head->pagesize,
+                               i / SMALL_PAGES_PER_LARGE_PAGE);
+                       del_large_from_small_free_list(head, ph, sp_bits);
+                       add_large_page_to_freelist(head, ph, sp_bits);
+               }
+       }
+}
+
+static u16 get_large_page(struct header *head, unsigned long poolsize,
+                         unsigned int sp_bits)
+{
+       unsigned int page;
+
+       page = pop_from_list(head, &head->large_free_list, sp_bits);
+       if (likely(page))
+               return page;
+
+       recombine_small_pages(head, poolsize, sp_bits);
+
+       return pop_from_list(head, &head->large_free_list, sp_bits);
+}
+
+/* Returns small page. */
+static unsigned long break_up_large_page(struct header *head,
+                                        unsigned int sp_bits,
+                                        u16 lpage)
+{
+       unsigned int i;
+
+       clear_bit(head->pagesize, lpage >> BITS_FROM_SMALL_TO_LARGE_PAGE);
+
+       for (i = 1; i < SMALL_PAGES_PER_LARGE_PAGE; i++) {
+               struct page_header *ph = from_pgnum(head, lpage + i, sp_bits);
+               /* Initialize this: huge_alloc reads it. */
+               ph->elements_used = 0;
+               add_small_page_to_freelist(head, ph, sp_bits);
+       }
+
+       return lpage;
+}
+
+static u16 get_small_page(struct header *head, unsigned long poolsize,
+                         unsigned int sp_bits)
+{
+       u16 ret;
+
+       ret = pop_from_list(head, &head->small_free_list, sp_bits);
+       if (likely(ret))
+               return ret;
+       ret = get_large_page(head, poolsize, sp_bits);
+       if (likely(ret))
+               ret = break_up_large_page(head, sp_bits, ret);
+       return ret;
+}
+
+static bool huge_allocated(struct header *head, unsigned long offset)
+{
+       unsigned long i;
+       struct huge_alloc *ha;
+
+       for (i = head->huge; i; i = ha->next) {
+               ha = (void *)((char *)head + i);
+               if (ha->off <= offset && ha->off + ha->len > offset)
+                       return true;
+       }
+       return false;
+}
+
+/* They want something really big.  Aim for contiguous pages (slow). */
+static COLD void *huge_alloc(void *pool, unsigned long poolsize,
+                            unsigned long size, unsigned long align)
+{
+       struct header *head = pool;
+       struct huge_alloc *ha;
+       unsigned long i, sp_bits, lp_bits, num, header_size;
+
+       sp_bits = small_page_bits(poolsize);
+       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
+
+       /* Allocate tracking structure optimistically. */
+       ha = alloc_get(pool, poolsize, sizeof(*ha), ALIGNOF(*ha));
+       if (!ha)
+               return NULL;
+
+       /* First search for contiguous small pages... */
+       header_size = sizeof(*head) + sizeof(head->bs) * (max_bucket(lp_bits)-1);
+
+       num = 0;
+       for (i = (header_size + (1UL << sp_bits) - 1) >> sp_bits;
+            i << sp_bits < poolsize;
+            i++) {
+               struct page_header *pg;
+               unsigned long off = (i << sp_bits);
+
+               /* Skip over large pages. */
+               if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
+                       i += (1UL << BITS_FROM_SMALL_TO_LARGE_PAGE)-1;
+                       continue;
+               }
+
+               /* Does this page meet alignment requirements? */
+               if (!num && off % align != 0)
+                       continue;
+
+               /* FIXME: This makes us O(n^2). */
+               if (huge_allocated(head, off)) {
+                       num = 0;
+                       continue;
+               }
+
+               pg = (struct page_header *)((char *)head + off);
+               if (pg->elements_used) {
+                       num = 0;
+                       continue;
+               }
+
+               num++;
+               if (num << sp_bits >= size) {
+                       unsigned long pgnum;
+
+                       /* Remove from free list. */
+                       for (pgnum = i; pgnum > i - num; pgnum--) {
+                               pg = from_pgnum(head, pgnum, sp_bits);
+                               del_from_list(head,
+                                             &head->small_free_list,
+                                             pg, sp_bits);
+                       }
+                       ha->off = (i - num + 1) << sp_bits;
+                       ha->len = num << sp_bits;
+                       goto done;
+               }
+       }
+
+       /* Now search for large pages... */
+       recombine_small_pages(head, poolsize, sp_bits);
+
+       num = 0;
+       for (i = (header_size + (1UL << lp_bits) - 1) >> lp_bits;
+            (i << lp_bits) < poolsize; i++) {
+               struct page_header *pg;
+               unsigned long off = (i << lp_bits);
+
+               /* Ignore small pages. */
+               if (!test_bit(head->pagesize, i))
+                       continue;
+
+               /* Does this page meet alignment requirements? */
+               if (!num && off % align != 0)
+                       continue;
+
+               /* FIXME: This makes us O(n^2). */
+               if (huge_allocated(head, off)) {
+                       num = 0;
+                       continue;
+               }
+
+               pg = (struct page_header *)((char *)head + off);
+               if (pg->elements_used) {
+                       num = 0;
+                       continue;
+               }
+
+               num++;
+               if (num << lp_bits >= size) {
+                       unsigned long pgnum;
+
+                       /* Remove from free list. */
+                       for (pgnum = i; pgnum > i - num; pgnum--) {
+                               pg = from_pgnum(head, pgnum, lp_bits);
+                               del_from_list(head,
+                                             &head->large_free_list,
+                                             pg, sp_bits);
+                       }
+                       ha->off = (i - num + 1) << lp_bits;
+                       ha->len = num << lp_bits;
+                       goto done;
+               }
+       }
+
+       /* Unable to satisfy: free huge alloc structure. */
+       alloc_free(pool, poolsize, ha);
+       return NULL;
+
+done:
+       add_to_huge_list(pool, ha);
+       return (char *)pool + ha->off;
+}
+
+static COLD void
+huge_free(struct header *head, unsigned long poolsize, void *free)
+{
+       unsigned long i, off, pgnum, free_off = (char *)free - (char *)head;
+       unsigned int sp_bits, lp_bits;
+       struct huge_alloc *ha;
+
+       for (i = head->huge; i; i = ha->next) {
+               ha = (void *)((char *)head + i);
+               if (free_off == ha->off)
+                       break;
+       }
+       assert(i);
+
+       /* Free up all the pages, delete and free ha */
+       sp_bits = small_page_bits(poolsize);
+       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
+       pgnum = free_off >> sp_bits;
+
+       if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
+               for (off = ha->off;
+                    off < ha->off + ha->len;
+                    off += 1UL << lp_bits) {
+                       add_large_page_to_freelist(head,
+                                                  (void *)((char *)head + off),
+                                                  sp_bits);
+               }
+       } else {
+               for (off = ha->off;
+                    off < ha->off + ha->len;
+                    off += 1UL << sp_bits) {
+                       add_small_page_to_freelist(head,
+                                                  (void *)((char *)head + off),
+                                                  sp_bits);
+               }
+       }
+       del_from_huge(head, ha);
+       alloc_free(head, poolsize, ha);
+}
+
+static COLD unsigned long huge_size(struct header *head, void *p)
+{
+       unsigned long i, off = (char *)p - (char *)head;
+       struct huge_alloc *ha;
+
+       for (i = head->huge; i; i = ha->next) {
+               ha = (void *)((char *)head + i);
+               if (off == ha->off) {
+                       return ha->len;
+               }
+       }
+       abort();
+}
+
+void *alloc_get(void *pool, unsigned long poolsize,
+               unsigned long size, unsigned long align)
+{
+       struct header *head = pool;
+       unsigned int bucket;
+       unsigned long i;
+       struct bucket_state *bs;
+       struct page_header *ph;
+       unsigned int sp_bits;
+
+       if (poolsize < MIN_USEFUL_SIZE) {
+               return tiny_alloc_get(pool, poolsize, size, align);
+       }
+
+       size = align_up(size, align);
+       if (unlikely(!size))
+               size = 1;
+       bucket = size_to_bucket(size);
+
+       sp_bits = small_page_bits(poolsize);
+
+       if (bucket >= max_bucket(sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE)) {
+               return huge_alloc(pool, poolsize, size, align);
+       }
+
+       bs = &head->bs[bucket];
+
+       if (!bs->page_list) {
+               struct page_header *ph;
+
+               if (large_page_bucket(bucket, sp_bits))
+                       bs->page_list = get_large_page(head, poolsize,
+                                                      sp_bits);
+               else
+                       bs->page_list = get_small_page(head, poolsize,
+                                                      sp_bits);
+               /* FIXME: Try large-aligned alloc?  Header stuffing? */
+               if (unlikely(!bs->page_list))
+                       return NULL;
+               ph = from_pgnum(head, bs->page_list, sp_bits);
+               ph->bucket = bucket;
+               ph->elements_used = 0;
+               ph->next = 0;
+               memset(ph->used, 0, used_size(bs->elements_per_page));
+       }
+
+       ph = from_pgnum(head, bs->page_list, sp_bits);
+
+       i = find_free_bit(ph->used);
+       set_bit(ph->used, i);
+       ph->elements_used++;
+
+       /* check if this page is now full */
+       if (unlikely(ph->elements_used == bs->elements_per_page)) {
+               del_from_bucket_list(head, bs, ph, sp_bits);
+               add_to_bucket_full_list(head, bs, ph, sp_bits);
+       }
+
+       return (char *)ph + page_header_size(ph->bucket / INTER_BUCKET_SPACE,
+                                            bs->elements_per_page)
+              + i * bucket_to_size(bucket);
+}
+
+void alloc_free(void *pool, unsigned long poolsize, void *free)
+{
+       struct header *head = pool;
+       struct bucket_state *bs;
+       unsigned int sp_bits;
+       unsigned long i, pgnum, pgoffset, offset = (char *)free - (char *)pool;
+       bool smallpage;
+       struct page_header *ph;
+
+       if (poolsize < MIN_USEFUL_SIZE) {
+               tiny_alloc_free(pool, poolsize, free);
+               return;
+       }
+
+       /* Get page header. */
+       sp_bits = small_page_bits(poolsize);
+       pgnum = offset >> sp_bits;
+
+       /* Big page? Round down further. */
+       if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
+               smallpage = false;
+               pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
+       } else
+               smallpage = true;
+
+       /* Step back to page header. */
+       ph = from_pgnum(head, pgnum, sp_bits);
+       if ((void *)ph == free) {
+               huge_free(head, poolsize, free);
+               return;
+       }
+
+       bs = &head->bs[ph->bucket];
+       pgoffset = offset - (pgnum << sp_bits)
+               - page_header_size(ph->bucket / INTER_BUCKET_SPACE,
+                                  bs->elements_per_page);
+
+       if (unlikely(ph->elements_used == bs->elements_per_page)) {
+               del_from_bucket_full_list(head, bs, ph, sp_bits);
+               add_to_bucket_list(head, bs, ph, sp_bits);
+       }
+
+       /* Which element are we? */
+       i = pgoffset / bucket_to_size(ph->bucket);
+       clear_bit(ph->used, i);
+       ph->elements_used--;
+
+       if (unlikely(ph->elements_used == 0)) {
+               bs = &head->bs[ph->bucket];
+               del_from_bucket_list(head, bs, ph, sp_bits);
+               if (smallpage)
+                       add_small_page_to_freelist(head, ph, sp_bits);
+               else
+                       add_large_page_to_freelist(head, ph, sp_bits);
+       }
+}
+
+unsigned long alloc_size(void *pool, unsigned long poolsize, void *p)
+{
+       struct header *head = pool;
+       unsigned int pgnum, sp_bits;
+       unsigned long offset = (char *)p - (char *)pool;
+       struct page_header *ph;
+
+       if (poolsize < MIN_USEFUL_SIZE)
+               return tiny_alloc_size(pool, poolsize, p);
+
+       /* Get page header. */
+       sp_bits = small_page_bits(poolsize);
+       pgnum = offset >> sp_bits;
+
+       /* Big page? Round down further. */
+       if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE))
+               pgnum &= ~(SMALL_PAGES_PER_LARGE_PAGE - 1);
+
+       /* Step back to page header. */
+       ph = from_pgnum(head, pgnum, sp_bits);
+       if ((void *)ph == p)
+               return huge_size(head, p);
+
+       return bucket_to_size(ph->bucket);
+}
+
+/* Useful for gdb breakpoints. */
+static bool check_fail(void)
+{
+       return false;
+}
+
+static unsigned long count_bits(const unsigned long bitmap[],
+                               unsigned long limit)
+{
+       unsigned long i, count = 0;
+
+       while (limit >= BITS_PER_LONG) {
+               count += popcount(bitmap[0]);
+               bitmap++;
+               limit -= BITS_PER_LONG;
+       }
+
+       for (i = 0; i < limit; i++)
+               if (test_bit(bitmap, i))
+                       count++;
+       return count;
+}
+
+static bool out_of_bounds(unsigned long pgnum,
+                         unsigned int sp_bits,
+                         unsigned long pagesize,
+                         unsigned long poolsize)
+{
+       if (((pgnum << sp_bits) >> sp_bits) != pgnum)
+               return true;
+
+       if ((pgnum << sp_bits) > poolsize)
+               return true;
+
+       return ((pgnum << sp_bits) + pagesize > poolsize);
+}
+
+static bool check_bucket(struct header *head,
+                        unsigned long poolsize,
+                        unsigned long pages[],
+                        struct bucket_state *bs,
+                        unsigned int bindex)
+{
+       bool lp_bucket;
+       struct page_header *ph;
+       unsigned long taken, i, prev, pagesize, sp_bits, lp_bits;
+
+       sp_bits = small_page_bits(poolsize);
+       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
+
+       lp_bucket = large_page_bucket(bindex, sp_bits);
+
+       pagesize = 1UL << (lp_bucket ? lp_bits : sp_bits);
+
+       /* This many elements fit? */
+       taken = page_header_size(bindex / INTER_BUCKET_SPACE,
+                                bs->elements_per_page);
+       taken += bucket_to_size(bindex) * bs->elements_per_page;
+       if (taken > pagesize)
+               return check_fail();
+
+       /* One more wouldn't fit? */
+       taken = page_header_size(bindex / INTER_BUCKET_SPACE,
+                                bs->elements_per_page + 1);
+       taken += bucket_to_size(bindex) * (bs->elements_per_page + 1);
+       if (taken <= pagesize)
+               return check_fail();
+
+       /* Walk used list. */
+       prev = 0;
+       for (i = bs->page_list; i; i = ph->next) {
+               /* Bad pointer? */
+               if (out_of_bounds(i, sp_bits, pagesize, poolsize))
+                       return check_fail();
+               /* Wrong size page? */
+               if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
+                   != lp_bucket)
+                       return check_fail();
+               /* Large page not on boundary? */
+               if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
+                       return check_fail();
+               ph = from_pgnum(head, i, sp_bits);
+               /* Linked list corrupt? */
+               if (ph->prev != prev)
+                       return check_fail();
+               /* Already seen this page? */
+               if (test_bit(pages, i))
+                       return check_fail();
+               set_bit(pages, i);
+               /* Empty or full? */
+               if (ph->elements_used == 0)
+                       return check_fail();
+               if (ph->elements_used >= bs->elements_per_page)
+                       return check_fail();
+               /* Used bits don't agree? */
+               if (ph->elements_used != count_bits(ph->used,
+                                                   bs->elements_per_page))
+                       return check_fail();
+               /* Wrong bucket? */
+               if (ph->bucket != bindex)
+                       return check_fail();
+               prev = i;
+       }
+
+       /* Walk full list. */
+       prev = 0;
+       for (i = bs->full_list; i; i = ph->next) {
+               /* Bad pointer? */
+               if (out_of_bounds(i, sp_bits, pagesize, poolsize))
+                       return check_fail();
+               /* Wrong size page? */
+               if (!!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)
+                   != lp_bucket)
+               /* Large page not on boundary? */
+               if (lp_bucket && (i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
+                       return check_fail();
+               ph = from_pgnum(head, i, sp_bits);
+               /* Linked list corrupt? */
+               if (ph->prev != prev)
+                       return check_fail();
+               /* Already seen this page? */
+               if (test_bit(pages, i))
+                       return check_fail();
+               set_bit(pages, i);
+               /* Not full? */
+               if (ph->elements_used != bs->elements_per_page)
+                       return check_fail();
+               /* Used bits don't agree? */
+               if (ph->elements_used != count_bits(ph->used,
+                                                   bs->elements_per_page))
+                       return check_fail();
+               /* Wrong bucket? */
+               if (ph->bucket != bindex)
+                       return check_fail();
+               prev = i;
+       }
+       return true;
+}
+
+bool alloc_check(void *pool, unsigned long poolsize)
+{
+       struct header *head = pool;
+       unsigned long prev, i, lp_bits, sp_bits, header_size, num_buckets;
+       struct page_header *ph;
+       struct huge_alloc *ha;
+       unsigned long pages[MAX_SMALL_PAGES / BITS_PER_LONG] = { 0 };
+
+       if (poolsize < MIN_USEFUL_SIZE)
+               return tiny_alloc_check(pool, poolsize);
+
+       sp_bits = small_page_bits(poolsize);
+       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
+
+       num_buckets = max_bucket(lp_bits);
+
+       header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
+
+       /* First, set all bits taken by header. */
+       for (i = 0; i < header_size; i += (1UL << sp_bits))
+               set_bit(pages, i >> sp_bits);
+
+       /* Check small page free list. */
+       prev = 0;
+       for (i = head->small_free_list; i; i = ph->next) {
+               /* Bad pointer? */
+               if (out_of_bounds(i, sp_bits, 1UL << sp_bits, poolsize))
+                       return check_fail();
+               /* Large page? */
+               if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
+                       return check_fail();
+               ph = from_pgnum(head, i, sp_bits);
+               /* Linked list corrupt? */
+               if (ph->prev != prev)
+                       return check_fail();
+               /* Already seen this page? */
+               if (test_bit(pages, i))
+                       return check_fail();
+               set_bit(pages, i);
+               prev = i;
+       }
+
+       /* Check large page free list. */
+       prev = 0;
+       for (i = head->large_free_list; i; i = ph->next) {
+               /* Bad pointer? */
+               if (out_of_bounds(i, sp_bits, 1UL << lp_bits, poolsize))
+                       return check_fail();
+               /* Not large page? */
+               if (!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE))
+                       return check_fail();
+               /* Not page boundary? */
+               if ((i % SMALL_PAGES_PER_LARGE_PAGE) != 0)
+                       return check_fail();
+               ph = from_pgnum(head, i, sp_bits);
+               /* Linked list corrupt? */
+               if (ph->prev != prev)
+                       return check_fail();
+               /* Already seen this page? */
+               if (test_bit(pages, i))
+                       return check_fail();
+               set_bit(pages, i);
+               prev = i;
+       }
+
+       /* Check the buckets. */
+       for (i = 0; i < max_bucket(lp_bits); i++) {
+               struct bucket_state *bs = &head->bs[i];
+
+               if (!check_bucket(head, poolsize, pages, bs, i))
+                       return false;
+       }
+
+       /* Check the huge alloc list. */
+       prev = 0;
+       for (i = head->huge; i; i = ha->next) {
+               unsigned long pgbits, j;
+
+               /* Bad pointer? */
+               if (i >= poolsize || i + sizeof(*ha) > poolsize)
+                       return check_fail();
+               ha = (void *)((char *)head + i);
+
+               /* Check contents of ha. */
+               if (ha->off > poolsize || ha->off + ha->len > poolsize)
+                       return check_fail();
+
+               /* Large or small page? */
+               pgbits = test_bit(head->pagesize, ha->off >> lp_bits)
+                       ? lp_bits : sp_bits;
+
+               /* Not page boundary? */
+               if ((ha->off % (1UL << pgbits)) != 0)
+                       return check_fail();
+
+               /* Not page length? */
+               if ((ha->len % (1UL << pgbits)) != 0)
+                       return check_fail();
+
+               /* Linked list corrupt? */
+               if (ha->prev != prev)
+                       return check_fail();
+
+               for (j = ha->off; j < ha->off + ha->len; j += (1UL<<sp_bits)) {
+                       /* Already seen this page? */
+                       if (test_bit(pages, j >> sp_bits))
+                               return check_fail();
+                       set_bit(pages, j >> sp_bits);
+               }
+
+               prev = i;
+       }
+
+       /* Make sure every page accounted for. */
+       for (i = 0; i < poolsize >> sp_bits; i++) {
+               if (!test_bit(pages, i))
+                       return check_fail();
+               if (test_bit(head->pagesize,
+                            i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) {
+                       /* Large page, skip rest. */
+                       i += SMALL_PAGES_PER_LARGE_PAGE - 1;
+               }
+       }
+
+       return true;
+}
+
+static unsigned long print_overhead(FILE *out, const char *desc,
+                                   unsigned long bytes,
+                                   unsigned long poolsize)
+{
+       fprintf(out, "Overhead (%s): %lu bytes (%.3g%%)\n",
+               desc, bytes, 100.0 * bytes / poolsize);
+       return bytes;
+}
+
+static unsigned long count_list(struct header *head,
+                               u16 pgnum,
+                               unsigned int sp_bits,
+                               unsigned long *total_elems)
+{
+       struct page_header *p;
+       unsigned long ret = 0;
+
+       while (pgnum) {
+               p = from_pgnum(head, pgnum, sp_bits);
+               if (total_elems)
+                       (*total_elems) += p->elements_used;
+               ret++;
+               pgnum = p->next;
+       }
+       return ret;
+}
+
+static unsigned long visualize_bucket(FILE *out, struct header *head,
+                                     unsigned int bucket,
+                                     unsigned long poolsize,
+                                     unsigned int sp_bits)
+{
+       unsigned long num_full, num_partial, num_pages, page_size,
+               elems, hdr_min, hdr_size, elems_per_page, overhead = 0;
+
+       elems_per_page = head->bs[bucket].elements_per_page;
+
+       /* If we used byte-based bitmaps, we could get pg hdr to: */
+       hdr_min = sizeof(struct page_header)
+               - sizeof(((struct page_header *)0)->used)
+               + align_up(elems_per_page, CHAR_BIT) / CHAR_BIT;
+       hdr_size = page_header_size(bucket / INTER_BUCKET_SPACE,
+                                   elems_per_page);
+
+       elems = 0;
+       num_full = count_list(head, head->bs[bucket].full_list, sp_bits,
+                             &elems);
+       num_partial = count_list(head, head->bs[bucket].page_list, sp_bits,
+                                &elems);
+       num_pages = num_full + num_partial;
+       if (!num_pages)
+               return 0;
+
+       fprintf(out, "Bucket %u (%lu bytes):"
+               " %lu full, %lu partial = %lu elements\n",
+               bucket, bucket_to_size(bucket), num_full, num_partial, elems);
+       /* Strict requirement of page header size. */
+       overhead += print_overhead(out, "page headers",
+                                  hdr_min * num_pages, poolsize);
+       /* Gap between minimal page header and actual start. */
+       overhead += print_overhead(out, "page post-header alignments",
+                                  (hdr_size - hdr_min) * num_pages, poolsize);
+       /* Between last element and end of page. */
+       page_size = (1UL << sp_bits);
+       if (large_page_bucket(bucket, sp_bits))
+               page_size <<= BITS_FROM_SMALL_TO_LARGE_PAGE;
+
+       overhead += print_overhead(out, "page tails",
+                                  (page_size - (hdr_size
+                                                + (elems_per_page
+                                                   * bucket_to_size(bucket))))
+                                  * num_pages, poolsize);
+       return overhead;
+}
+
+void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
+{
+       struct header *head = pool;
+       unsigned long i, lp_bits, sp_bits, header_size, num_buckets, count,
+               overhead = 0;
+
+       fprintf(out, "Pool %p size %lu: (%s allocator)\n", pool, poolsize,
+               poolsize < MIN_USEFUL_SIZE ? "tiny" : "standard");
+
+       if (poolsize < MIN_USEFUL_SIZE) {
+               tiny_alloc_visualize(out, pool, poolsize);
+               return;
+       }
+
+       sp_bits = small_page_bits(poolsize);
+       lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
+
+       num_buckets = max_bucket(lp_bits);
+       header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);
+
+       fprintf(out, "Large page size %lu, small page size %lu.\n",
+               1UL << lp_bits, 1UL << sp_bits);
+       overhead += print_overhead(out, "unused pool tail",
+                                  poolsize % (1UL << lp_bits), poolsize);
+       fprintf(out, "Main header %lu bytes (%lu small pages).\n",
+               header_size, align_up(header_size, 1UL << sp_bits) >> sp_bits);
+       overhead += print_overhead(out, "partial header page",
+                                  align_up(header_size, 1UL << sp_bits)
+                                  - header_size, poolsize);
+       /* Total large pages. */
+       i = count_bits(head->pagesize, poolsize >> lp_bits);
+       /* Used pages. */
+       count = i - count_list(head, head->large_free_list, sp_bits, NULL);
+       fprintf(out, "%lu/%lu large pages used (%.3g%%)\n",
+               count, i, count ? 100.0 * count / i : 0.0);
+
+       /* Total small pages. */
+       i = ((poolsize >> lp_bits) - i) << BITS_FROM_SMALL_TO_LARGE_PAGE;
+       /* Used pages */
+       count = i - count_list(head, head->small_free_list, sp_bits, NULL);
+       fprintf(out, "%lu/%lu small pages used (%.3g%%)\n",
+               count, i, count ? 100.0 * count / i : 0.0);
+
+       /* Summary of each bucket. */
+       fprintf(out, "%lu buckets:\n", num_buckets);
+       for (i = 0; i < num_buckets; i++)
+               overhead += visualize_bucket(out, head, i, poolsize, sp_bits);
+
+       print_overhead(out, "total", overhead, poolsize);
+}
diff --git a/ccan/antithread/alloc/alloc.h b/ccan/antithread/alloc/alloc.h
new file mode 100644 (file)
index 0000000..8ffa169
--- /dev/null
@@ -0,0 +1,130 @@
+/* Licensed under LGPLv2.1+ - see LICENSE file for details */
+#ifndef ALLOC_H
+#define ALLOC_H
+#include <stdio.h>
+#include <stdbool.h>
+
+/**
+ * alloc_init - initialize a pool of memory for the allocator.
+ * @pool: the contiguous bytes for the allocator to use
+ * @poolsize: the size of the pool
+ *
+ * This stores all the setup state required to perform allocation within the
+ * pool (there is no external state).  Any previous contents of @pool is
+ * discarded.
+ *
+ * The same @pool and @poolsize arguments must be handed to the other alloc
+ * functions after this.
+ *
+ * If the pool is too small for meaningful allocations, alloc_get will fail.
+ *
+ * Example:
+ *     void *pool = malloc(32*1024*1024);
+ *     if (!pool)
+ *             err(1, "Failed to allocate 32MB");
+ *     alloc_init(pool, 32*1024*1024);
+ */
+void alloc_init(void *pool, unsigned long poolsize);
+
+/**
+ * alloc_get - allocate some memory from the pool
+ * @pool: the contiguous bytes for the allocator to use
+ * @poolsize: the size of the pool
+ * @size: the size of the desired allocation
+ * @align: the alignment of the desired allocation (0 or power of 2)
+ *
+ * This is "malloc" within an initialized pool.
+ *
+ * It will return a unique pointer within the pool (ie. between @pool
+ * and @pool+@poolsize) which meets the alignment requirements of
+ * @align.  Note that the alignment is relative to the start of the pool,
+ * so of @pool is not aligned, the pointer won't be either.
+ *
+ * Returns NULL if there is no contiguous room.
+ *
+ * Example:
+ *     #include <ccan/alignof/alignof.h>
+ *     ...
+ *             double *d = alloc_get(pool, 32*1024*1024,
+ *                                   sizeof(*d), ALIGNOF(*d));
+ *             if (!d)
+ *                     err(1, "Failed to allocate a double");
+ */
+void *alloc_get(void *pool, unsigned long poolsize,
+               unsigned long size, unsigned long align);
+
+/**
+ * alloc_free - free some allocated memory from the pool
+ * @pool: the contiguous bytes for the allocator to use
+ * @poolsize: the size of the pool
+ * @p: the non-NULL pointer returned from alloc_get.
+ *
+ * This is "free" within an initialized pool.  A pointer should only be
+ * freed once, and must be a pointer returned from a successful alloc_get()
+ * call.
+ *
+ * Example:
+ *     alloc_free(pool, 32*1024*1024, d);
+ */
+void alloc_free(void *pool, unsigned long poolsize, void *free);
+
+/**
+ * alloc_size - get the actual size allocated by alloc_get
+ * @pool: the contiguous bytes for the allocator to use
+ * @poolsize: the size of the pool
+ * @p: the non-NULL pointer returned from alloc_get.
+ *
+ * alloc_get() may overallocate, in which case you may use the extra
+ * space exactly as if you had asked for it.
+ *
+ * The return value will always be at least the @size passed to alloc_get().
+ *
+ * Example:
+ *     printf("Allocating a double actually got me %lu bytes\n",
+ *             alloc_size(pool, 32*1024*1024, d));
+ */
+unsigned long alloc_size(void *pool, unsigned long poolsize, void *p);
+
+/**
+ * alloc_check - check the integrity of the allocation pool
+ * @pool: the contiguous bytes for the allocator to use
+ * @poolsize: the size of the pool
+ *
+ * alloc_check() can be used for debugging suspected pool corruption.  It may
+ * be quite slow, but provides some assistance for hard-to-find overruns or
+ * double-frees.  Unlike the rest of the code, it will not crash on corrupted
+ * pools.
+ *
+ * There is an internal function check_fail() which this calls on failure which
+ * is useful for placing breakpoints and gaining more insight into the type
+ * of the corruption detected.
+ *
+ * Example:
+ *     #include <assert.h>
+ *     ...
+ *             assert(alloc_check(pool, 32*1024*1024));
+ */
+bool alloc_check(void *pool, unsigned long poolsize);
+
+/**
+ * alloc_visualize - dump information about the allocation pool
+ * @pool: the contiguous bytes for the allocator to use
+ * @poolsize: the size of the pool
+ *
+ * When debugging the allocator itself, it's often useful to see how
+ * the pool is being used.  alloc_visualize() does that, but makes
+ * assumptions about correctness (like the rest of the code) so if you
+ * suspect corruption call alloc_check() first.
+ *
+ * Example:
+ *     d = alloc_get(pool, 32*1024*1024, sizeof(*d), ALIGNOF(*d));
+ *     if (!d) {
+ *             fprintf(stderr, "Allocation failed!\n");
+ *             if (!alloc_check(pool, 32*1024*1024))
+ *                     errx(1, "Allocation pool is corrupt");
+ *             alloc_visualize(stderr, pool, 32*1024*1024);
+ *             exit(1);
+ *     }
+ */
+void alloc_visualize(FILE *out, void *pool, unsigned long poolsize);
+#endif /* ALLOC_H */
diff --git a/ccan/antithread/alloc/bitops.c b/ccan/antithread/alloc/bitops.c
new file mode 100644 (file)
index 0000000..78c2b24
--- /dev/null
@@ -0,0 +1,96 @@
+/* Licensed under LGPLv2.1+ - see LICENSE file for details */
+#include "bitops.h"
+#include "config.h"
+#include <ccan/build_assert/build_assert.h>
+#include <ccan/short_types/short_types.h>
+#include <ccan/ilog/ilog.h>
+#include <limits.h>
+
+unsigned int afls(unsigned long val)
+{
+       BUILD_ASSERT(sizeof(val) == sizeof(u32) || sizeof(val) == sizeof(u64));
+       if (sizeof(val) == sizeof(u32))
+               return ilog32(val);
+       else
+               return ilog64(val);
+}
+
+/* FIXME: Move to bitops. */
+unsigned int affsl(unsigned long val)
+{
+#if HAVE_BUILTIN_FFSL
+       /* This is significantly faster! */
+       return __builtin_ffsl(val);
+#else
+       unsigned int r = 1;
+
+       if (!val)
+               return 0;
+       if (sizeof(long) == sizeof(u64)) {
+               if (!(val & 0xffffffff)) {
+                       /* Workaround gcc warning on 32-bit:
+                          error: right shift count >= width of type */
+                       u64 tmp = val;
+                       tmp >>= 32;
+                       val = tmp;
+                       r += 32;
+               }
+       }
+       if (!(val & 0xffff)) {
+               val >>= 16;
+               r += 16;
+       }
+       if (!(val & 0xff)) {
+               val >>= 8;
+               r += 8;
+       }
+       if (!(val & 0xf)) {
+               val >>= 4;
+               r += 4;
+       }
+       if (!(val & 3)) {
+               val >>= 2;
+               r += 2;
+       }
+       if (!(val & 1)) {
+               val >>= 1;
+               r += 1;
+       }
+       return r;
+#endif
+}
+
+unsigned int popcount(unsigned long val)
+{
+#if HAVE_BUILTIN_POPCOUNTL
+       return __builtin_popcountl(val);
+#else
+       if (sizeof(long) == sizeof(u64)) {
+               u64 v = val;
+               v = (v & 0x5555555555555555ULL)
+                       + ((v >> 1) & 0x5555555555555555ULL);
+               v = (v & 0x3333333333333333ULL)
+                       + ((v >> 2) & 0x3333333333333333ULL);
+               v = (v & 0x0F0F0F0F0F0F0F0FULL)
+                       + ((v >> 4) & 0x0F0F0F0F0F0F0F0FULL);
+               v = (v & 0x00FF00FF00FF00FFULL)
+                       + ((v >> 8) & 0x00FF00FF00FF00FFULL);
+               v = (v & 0x0000FFFF0000FFFFULL)
+                       + ((v >> 16) & 0x0000FFFF0000FFFFULL);
+               v = (v & 0x00000000FFFFFFFFULL)
+                       + ((v >> 32) & 0x00000000FFFFFFFFULL);
+               return v;
+       }
+       val = (val & 0x55555555ULL) + ((val >> 1) & 0x55555555ULL);
+       val = (val & 0x33333333ULL) + ((val >> 2) & 0x33333333ULL);
+       val = (val & 0x0F0F0F0FULL) + ((val >> 4) & 0x0F0F0F0FULL);
+       val = (val & 0x00FF00FFULL) + ((val >> 8) & 0x00FF00FFULL);
+       val = (val & 0x0000FFFFULL) + ((val >> 16) & 0x0000FFFFULL);
+       return val;
+#endif
+}
+
+unsigned long align_up(unsigned long x, unsigned long align)
+{
+       return (x + align - 1) & ~(align - 1);
+}
diff --git a/ccan/antithread/alloc/bitops.h b/ccan/antithread/alloc/bitops.h
new file mode 100644 (file)
index 0000000..3feeed2
--- /dev/null
@@ -0,0 +1,8 @@
+/* Licensed under LGPLv2.1+ - see LICENSE file for details */
+#ifndef CCAN_ALLOC_BITOPS_H
+#define CCAN_ALLOC_BITOPS_H
+unsigned int afls(unsigned long val);
+unsigned int affsl(unsigned long val);
+unsigned int popcount(unsigned long val);
+unsigned long align_up(unsigned long x, unsigned long align);
+#endif /* CCAN_ALLOC_BITOPS_H */
diff --git a/ccan/antithread/alloc/test/run-corrupt.c b/ccan/antithread/alloc/test/run-corrupt.c
new file mode 100644 (file)
index 0000000..2c332d5
--- /dev/null
@@ -0,0 +1,26 @@
+/* Example allocation which caused corruption. */
+#include <ccan/antithread/alloc/alloc.c>
+#include <ccan/antithread/alloc/bitops.c>
+#include <ccan/antithread/alloc/tiny.c>
+#include <ccan/tap/tap.h>
+#include <stdlib.h>
+
+int main(int argc, char *argv[])
+{
+       void *mem;
+
+       plan_tests(7);
+
+       mem = malloc(1179648);
+       alloc_init(mem, 1179648);
+       ok1(alloc_check(mem, 1179648));
+       ok1(alloc_get(mem, 1179648, 48, 16));
+       ok1(alloc_check(mem, 1179648));
+       ok1(alloc_get(mem, 1179648, 53, 16));
+       ok1(alloc_check(mem, 1179648));
+       ok1(alloc_get(mem, 1179648, 53, 16));
+       ok1(alloc_check(mem, 1179648));
+       free(mem);
+
+       return exit_status();
+}
diff --git a/ccan/antithread/alloc/test/run-testsize.c b/ccan/antithread/alloc/test/run-testsize.c
new file mode 100644 (file)
index 0000000..c702a3d
--- /dev/null
@@ -0,0 +1,79 @@
+#include <ccan/antithread/alloc/alloc.h>
+#include <ccan/tap/tap.h>
+#include <ccan/antithread/alloc/alloc.c>
+#include <ccan/antithread/alloc/bitops.c>
+#include <ccan/antithread/alloc/tiny.c>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <err.h>
+
+static void invert_bytes(unsigned char *p, unsigned long size)
+{
+       unsigned int i;
+
+       for (i = 0; i < size; i++)
+               p[i] ^= 0xFF;
+}
+
+static bool sizes_ok(void *mem, unsigned long poolsize, void *p[], unsigned num)
+{
+       unsigned int i;
+
+       for (i = 0; i < num; i++)
+               if (p[i] && alloc_size(mem, poolsize, p[i]) < i)
+                       return false;
+       return true;
+}
+
+static void test_pool(unsigned long pool_size)
+{
+       unsigned int i, num;
+       void *mem;
+       void **p;
+       bool flip = false;
+
+       p = calloc(pool_size, sizeof(void *));
+       mem = malloc(pool_size);
+
+       alloc_init(mem, pool_size);
+
+       /* Check that alloc_size() gives reasonable answers. */
+       for (i = 0; i < pool_size; i = i * 3 / 2 + 1) {
+               p[i] = alloc_get(mem, pool_size, i, 1);
+               if (!p[i])
+                       break;
+               invert_bytes(p[i], alloc_size(mem, pool_size, p[i]));
+       }
+       ok1(i < pool_size);
+       num = i;
+       ok1(alloc_check(mem, pool_size));
+       ok1(sizes_ok(mem, pool_size, p, num));
+
+       /* Free every second one. */
+       for (i = 0; i < num; i = i * 3 / 2 + 1) {
+               flip = !flip;
+               if (flip) {
+                       invert_bytes(p[i], alloc_size(mem,pool_size,p[i]));
+                       continue;
+               }
+               alloc_free(mem, pool_size, p[i]);
+               p[i] = NULL;
+       }
+       ok1(alloc_check(mem, pool_size));
+       ok1(sizes_ok(mem, pool_size, p, num));
+       free(p);
+       free(mem);
+}
+
+int main(int argc, char *argv[])
+{
+       plan_tests(10);
+
+       /* Large test. */
+       test_pool(MIN_USEFUL_SIZE * 2);
+
+       /* Small test. */
+       test_pool(MIN_USEFUL_SIZE / 2);
+
+       return exit_status();
+}
diff --git a/ccan/antithread/alloc/test/run-tiny-encode.c b/ccan/antithread/alloc/test/run-tiny-encode.c
new file mode 100644 (file)
index 0000000..a76e746
--- /dev/null
@@ -0,0 +1,59 @@
+#include <ccan/tap/tap.h>
+#include "config.h"
+#include <ccan/antithread/alloc/tiny.c>
+#include <ccan/antithread/alloc/bitops.c>
+#include <stdlib.h>
+#include <err.h>
+
+/* Test encoding and decoding. */
+#define ARR_SIZE 10
+
+int main(void)
+{
+       unsigned char array[ARR_SIZE];
+       unsigned int i, prev;
+
+       plan_tests(405);
+
+       prev = 0;
+       /* Test encode_length */
+       for (i = 1; i < 0x8000000; i *= 2) {
+               ok1(encode_length(i-1) >= prev);
+               ok1(encode_length(i) >= encode_length(i-1));
+               ok1(encode_length(i+1) >= encode_length(i));
+               prev = encode_length(i);
+       }
+
+       /* Test it against actual encoding return val. */
+       for (i = 1; i < 0x8000000; i *= 2) {
+               ok1(encode_length(i-1) == encode(i - 1 + MIN_BLOCK_SIZE,
+                                                false, array));
+               ok1(encode_length(i) == encode(i + MIN_BLOCK_SIZE,
+                                              false, array));
+               ok1(encode_length(i+1) == encode(i + 1 + MIN_BLOCK_SIZE,
+                                                false, array));
+       }
+
+       /* Test encoder vs. decoder. */
+       for (i = 1; i < 0x8000000; i *= 2) {
+               unsigned long hdrlen, len;
+               bool free;
+
+               hdrlen = encode(i - 1 + MIN_BLOCK_SIZE, false, array);
+               ok1(decode(&len, &free, array) == hdrlen);
+               ok1(len == i - 1 + MIN_BLOCK_SIZE);
+               ok1(free == false);
+
+               hdrlen = encode(i + MIN_BLOCK_SIZE, true, array);
+               ok1(decode(&len, &free, array) == hdrlen);
+               ok1(len == i + MIN_BLOCK_SIZE);
+               ok1(free == true);
+
+               hdrlen = encode(i + 1 + MIN_BLOCK_SIZE, true, array);
+               ok1(decode(&len, &free, array) == hdrlen);
+               ok1(len == i + 1 + MIN_BLOCK_SIZE);
+               ok1(free == true);
+       }
+
+       return exit_status();
+}
diff --git a/ccan/antithread/alloc/test/run.c b/ccan/antithread/alloc/test/run.c
new file mode 100644 (file)
index 0000000..4b2b900
--- /dev/null
@@ -0,0 +1,179 @@
+#include <ccan/antithread/alloc/alloc.h>
+#include <ccan/tap/tap.h>
+#include <ccan/antithread/alloc/alloc.c>
+#include <ccan/antithread/alloc/bitops.c>
+#include <ccan/antithread/alloc/tiny.c>
+#include <stdlib.h>
+#include <err.h>
+
+#define sort(p, num, cmp) \
+       qsort((p), (num), sizeof(*p), (int(*)(const void *, const void *))cmp)
+
+static int addr_cmp(void **a, void **b)
+{
+       return (char *)(*a) - (char *)(*b);
+}
+
+static bool unique(void *p[], unsigned int num)
+{
+       unsigned int i;
+
+       for (i = 1; i < num; i++)
+               if (p[i] == p[i-1])
+                       return false;
+       return true;
+}
+
+static bool free_every_second_one(void *mem, unsigned int num,
+                                 unsigned long pool_size, void *p[])
+{
+       unsigned int i;
+
+       /* Free every second one. */
+       for (i = 0; i < num; i += 2) {
+               alloc_free(mem, pool_size, p[i]);
+       }
+       if (!alloc_check(mem, pool_size))
+               return false;
+       for (i = 1; i < num; i += 2) {
+               alloc_free(mem, pool_size, p[i]);
+       }
+       if (!alloc_check(mem, pool_size))
+               return false;
+       return true;
+}
+
+static void test(unsigned int pool_size)
+{
+       void *mem;
+       unsigned int i, num, max_size;
+       void **p = calloc(pool_size, sizeof(*p));
+       unsigned alloc_limit = pool_size / 2;
+
+       mem = malloc(pool_size);
+
+       /* Small pool, all allocs fail, even 0-length. */
+       alloc_init(mem, 0);
+       ok1(alloc_check(mem, 0));
+       ok1(alloc_get(mem, 0, 1, 1) == NULL);
+       ok1(alloc_get(mem, 0, 128, 1) == NULL);
+       ok1(alloc_get(mem, 0, 0, 1) == NULL);
+
+       /* Free of NULL should work. */
+       alloc_free(mem, 0, NULL);
+
+       alloc_init(mem, pool_size);
+       ok1(alloc_check(mem, pool_size));
+       /* Find largest allocation which works. */
+       for (max_size = pool_size + 1; max_size; max_size--) {
+               p[0] = alloc_get(mem, pool_size, max_size, 1);
+               if (p[0])
+                       break;
+       }
+       ok1(max_size < pool_size);
+       ok1(max_size > 0);
+       ok1(alloc_check(mem, pool_size));
+       ok1(alloc_size(mem, pool_size, p[0]) >= max_size);
+
+       /* Free it, should be able to reallocate it. */
+       alloc_free(mem, pool_size, p[0]);
+       ok1(alloc_check(mem, pool_size));
+
+       p[0] = alloc_get(mem, pool_size, max_size, 1);
+       ok1(p[0]);
+       ok1(alloc_size(mem, pool_size, p[0]) >= max_size);
+       ok1(alloc_check(mem, pool_size));
+       alloc_free(mem, pool_size, p[0]);
+       ok1(alloc_check(mem, pool_size));
+
+       /* Allocate a whole heap. */
+       for (i = 0; i < pool_size; i++) {
+               p[i] = alloc_get(mem, pool_size, 1, 1);
+               if (!p[i])
+                       break;
+       }
+
+       /* Uncomment this for a more intuitive view of what the
+        * allocator looks like after all these 1 byte allocs. */
+#if 0
+       alloc_visualize(stderr, mem, pool_size);
+#endif
+
+       num = i;
+       /* Can't allocate this many. */
+       ok1(num != pool_size);
+       ok1(alloc_check(mem, pool_size));
+
+       /* Sort them. */
+       sort(p, num, addr_cmp);
+
+       /* Uniqueness check */
+       ok1(unique(p, num));
+
+       ok1(free_every_second_one(mem, num, pool_size, p));
+       ok1(alloc_check(mem, pool_size));
+
+       /* Should be able to reallocate max size. */
+       p[0] = alloc_get(mem, pool_size, max_size, 1);
+       ok1(p[0]);
+       ok1(alloc_check(mem, pool_size));
+       ok1(alloc_size(mem, pool_size, p[0]) >= max_size);
+
+       /* Re-initializing should be the same as freeing everything */
+       alloc_init(mem, pool_size);
+       ok1(alloc_check(mem, pool_size));
+       p[0] = alloc_get(mem, pool_size, max_size, 1);
+       ok1(p[0]);
+       ok1(alloc_size(mem, pool_size, p[0]) >= max_size);
+       ok1(alloc_check(mem, pool_size));
+       alloc_free(mem, pool_size, p[0]);
+       ok1(alloc_check(mem, pool_size));
+
+       /* Alignment constraints should be met, as long as powers of two */
+       for (i = 0; (1 << i) < alloc_limit; i++) {
+               p[i] = alloc_get(mem, pool_size, i, 1 << i);
+               ok1(p[i]);
+               ok1(((char *)p[i] - (char *)mem) % (1 << i) == 0);
+               ok1(alloc_check(mem, pool_size));
+               ok1(alloc_size(mem, pool_size, p[i]) >= i);
+       }
+
+       for (i = 0; (1 << i) < alloc_limit; i++) {
+               alloc_free(mem, pool_size, p[i]);
+               ok1(alloc_check(mem, pool_size));
+       }
+
+       /* Alignment constraints for a single-byte allocation. */
+       for (i = 0; (1 << i) < alloc_limit; i++) {
+               p[0] = alloc_get(mem, pool_size, 1, 1 << i);
+               ok1(p[0]);
+               ok1(((char *)p[0] - (char *)mem) % (1 << i) == 0);
+               ok1(alloc_check(mem, pool_size));
+               ok1(alloc_size(mem, pool_size, p[0]) >= 1);
+               alloc_free(mem, pool_size, p[0]);
+               ok1(alloc_check(mem, pool_size));
+       }
+
+       /* Alignment check for a 0-byte allocation.  Corner case. */
+       p[0] = alloc_get(mem, pool_size, 0, alloc_limit);
+       ok1(alloc_check(mem, pool_size));
+       ok1(alloc_size(mem, pool_size, p[0]) < pool_size);
+       alloc_free(mem, pool_size, p[0]);
+       ok1(alloc_check(mem, pool_size));
+
+       free(mem);
+       free(p);
+}
+
+int main(int argc, char *argv[])
+{
+       plan_tests(440);
+
+       /* Large test. */
+       test(MIN_USEFUL_SIZE * 2);
+
+       /* Small test. */
+       test(MIN_USEFUL_SIZE / 2);
+
+       return exit_status();
+}
diff --git a/ccan/antithread/alloc/tiny.c b/ccan/antithread/alloc/tiny.c
new file mode 100644 (file)
index 0000000..ffd17c6
--- /dev/null
@@ -0,0 +1,431 @@
+/* Licensed under LGPLv2.1+ - see LICENSE file for details */
+#include "tiny.h"
+#include "bitops.h"
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* One byte header, one byte data. */
+#define MIN_BLOCK_SIZE 2
+
+/* Bit 7 (in any byte) == start or end. */
+#define TERM_BIT 0x80
+/* Bit 6 (first and last byte) == one byte long. */
+#define SINGLE_BYTE 0x40
+/* Bit 5 (of first byte) == "is this block free?" */
+#define FREE_BIT 0x20
+
+#define MAX_FREE_CACHED_SIZE 256
+
+/* Val is usually offset by MIN_BLOCK_SIZE here. */
+static unsigned encode_length(unsigned long val)
+{
+       unsigned int bits = afls(val);
+       /* 5 bits in first byte. */
+       if (bits <= 5)
+               return 1;
+       /* 6 bits in last byte, 7 bits in middle ones. */
+       return 2 + (bits - 5) / 7;
+}
+
+/* Header is included in length, so we might need an extra byte. */
+static unsigned encode_len_with_header(unsigned long len)
+{
+       unsigned int hdrlen = 1;
+
+       assert(len);
+       while (encode_length(len + hdrlen - MIN_BLOCK_SIZE) != hdrlen)
+               hdrlen = encode_length(len + hdrlen - MIN_BLOCK_SIZE);
+
+       return hdrlen;
+}
+
+/* Encoding can be read from front or back; 0 is invalid at either
+ * start or end.  Returns bytes used for header. */
+static unsigned encode(unsigned long len, bool free, unsigned char arr[])
+{
+       unsigned int hdrlen = 1;
+
+       /* We can never have a length < MIN_BLOCK_SIZE. */
+       assert(len >= MIN_BLOCK_SIZE);
+       len -= MIN_BLOCK_SIZE;
+
+       /* First byte always contains free bit. */
+       arr[0] = TERM_BIT | (free ? FREE_BIT : 0);
+       /* Also holds 5 bits of data (0 - 31) */
+       arr[0] |= (len & 0x1F);
+       len >>= 5;
+
+       /* One byte only? */
+       if (!len) {
+               arr[0] |= SINGLE_BYTE;
+               return hdrlen;
+       }
+
+       /* Middle bytes. */
+       while (len >= (1 << 6)) {
+               /* Next 7 data bits */
+               arr[hdrlen++] = (len & 0x7F);
+               len >>= 7;
+       }
+       arr[hdrlen++] = (len | TERM_BIT);
+       return hdrlen;
+}
+
+/* Returns bytes used for header. */
+static unsigned decode(unsigned long *len, bool *free, const unsigned char *arr)
+{
+       unsigned int hdrlen = 0, bits = 5;
+
+       /* Free flag is in bit 5 */
+       *free = (arr[hdrlen] & FREE_BIT);
+       /* Bottom five bits are data. */
+       *len = (arr[hdrlen] & 0x1f);
+       if (!(arr[hdrlen++] & SINGLE_BYTE)) {
+               /* Multi-byte encoding? */
+               while (!(arr[hdrlen] & TERM_BIT)) {
+                       /* 7 more data bits. */
+                       *len |= (arr[hdrlen] & 0x7fUL) << bits;
+                       hdrlen++;
+                       bits += 7;
+               }
+               /* Final byte has 6 bits. */
+               *len |= (arr[hdrlen] & 0x3fUL) << bits;
+               hdrlen++;
+       }
+
+       *len += MIN_BLOCK_SIZE;
+       return hdrlen;
+}
+
+/* We keep a helper array for freed mem, one byte per k. */
+static unsigned long free_array_size(unsigned long poolsize)
+{
+       return poolsize / 1024;
+}
+
+/* We have series of 69 free sizes like so:
+ * 1, 2, 3, 4.  6, 8, 10, 12, 14, 16. 20, 24, 28, 32... 252.
+ */
+static unsigned long free_array_off(unsigned long size)
+{
+       unsigned long off;
+
+       if (size <= 4)
+               off = size - 1;
+       else if (size <= 16)
+               off = size / 2 + 1;
+       else
+               off = size / 4 + 5;
+
+       off *= 3;
+       return off;
+}
+
+void tiny_alloc_init(void *pool, unsigned long poolsize)
+{
+       /* We start with free array, and then the rest is free. */
+       unsigned long arrsize = free_array_size(poolsize);
+
+       /* Do nothing with 1 byte or less! */
+       if (poolsize < MIN_BLOCK_SIZE)
+               return;
+
+       memset(pool, 0, arrsize);
+       encode(poolsize - arrsize, true, (unsigned char *)pool + arrsize);
+}
+
+/* Walk through and try to coalesce */
+static bool try_coalesce(unsigned char *pool, unsigned long poolsize)
+{
+       unsigned long len, prev_off = 0, prev_len = 0, off;
+       bool free, prev_free = false, coalesced = false;
+
+       off = free_array_size(poolsize);
+       do {
+               decode(&len, &free, pool + off);
+               if (free && prev_free) {
+                       prev_len += len;
+                       encode(prev_len, true, pool + prev_off);
+                       coalesced = true;
+               } else {
+                       prev_free = free;
+                       prev_off = off;
+                       prev_len = len;
+               }
+               off += len;
+       } while (off < poolsize);
+
+       /* Clear the free array. */
+       if (coalesced)
+               memset(pool, 0, free_array_size(poolsize));
+
+       return coalesced;
+}
+
+static bool long_enough(unsigned long offset, unsigned long len,
+                       unsigned long size, unsigned long align)
+{
+       unsigned long end = offset + len;
+
+       offset += encode_len_with_header(len);
+       offset = align_up(offset, align);
+       return offset + size <= end;
+}
+
+static void add_to_free_array(unsigned char *arr,
+                             unsigned long poolsize,
+                             unsigned long size,
+                             unsigned long off)
+{
+       unsigned long fa_off;
+
+       if (size >= MAX_FREE_CACHED_SIZE)
+               return;
+
+       for (fa_off = free_array_off(size);
+            fa_off + 3 < free_array_size(poolsize);
+            fa_off += free_array_off(MAX_FREE_CACHED_SIZE)) {
+               if (!arr[fa_off] && !arr[fa_off+1] && !arr[fa_off+2]) {
+                       arr[fa_off] = (off >> 16);
+                       arr[fa_off+1] = (off >> 8);
+                       arr[fa_off+2] = off;
+                       break;
+               }
+       }
+}
+
+void *tiny_alloc_get(void *pool, unsigned long poolsize,
+                    unsigned long size, unsigned long align)
+{
+       unsigned long arrsize = free_array_size(poolsize);
+       unsigned long len, off, actual, hdr, free_bucket;
+       long fa_off;
+       unsigned char *arr = pool;
+       bool free, coalesced = false;
+
+       /* We can't do anything with tiny pools. */
+       if (poolsize < MIN_BLOCK_SIZE)
+               return NULL;
+
+       /* We don't do zero-allocs; allows 1 more offset in encoding. */
+       if (!size)
+               size = 1;
+
+       /* Look for entries in free array, starting from right size up. */
+       for (free_bucket = free_array_off(size);
+            free_bucket < free_array_off(MAX_FREE_CACHED_SIZE);
+            free_bucket += 3) {
+               for (fa_off = free_bucket;
+                    fa_off + 3 < free_array_size(poolsize);
+                    fa_off += free_array_off(MAX_FREE_CACHED_SIZE)) {
+                       off = ((unsigned long)arr[fa_off]) << 16
+                               | ((unsigned long)arr[fa_off+1]) << 8
+                               | ((unsigned long)arr[fa_off+2]);
+                       if (!off)
+                               continue;
+
+                       decode(&len, &free, arr + off);
+                       if (long_enough(off, len, size, align)) {
+                               /* Remove it. */
+                               memset(&arr[fa_off], 0, 3);
+                               goto found;
+                       }
+               }
+       }
+
+again:
+       off = arrsize;
+
+       decode(&len, &free, arr + off);
+       while (!free || !long_enough(off, len, size, align)) {
+               /* Refill free array as we go. */
+               if (free && coalesced)
+                       add_to_free_array(arr, poolsize, len, off);
+
+               off += len;
+               /* Hit end? */
+               if (off == poolsize) {
+                       if (!coalesced && try_coalesce(pool, poolsize)) {
+                               coalesced = true;
+                               goto again;
+                       }
+                       return NULL;
+               }
+               decode(&len, &free, arr + off);
+       }
+
+found:
+       /* We have a free block.  Since we walk from front, take far end. */
+       actual = ((off + len - size) & ~(align - 1));
+       hdr = actual - encode_len_with_header(off + len - actual);
+
+
+       /* Do we have enough room to split? */
+       if (hdr - off >= MIN_BLOCK_SIZE) {
+               encode(hdr - off, true, arr + off);
+               add_to_free_array(arr, poolsize, hdr - off, off);
+       } else {
+               hdr = off;
+       }
+
+       /* Make sure that we are all-zero up to actual, so we can walk back
+        * and find header. */
+       memset(arr + hdr, 0, actual - hdr);
+
+       /* Create header for allocated block. */
+       encode(off + len - hdr, false, arr + hdr);
+
+       return arr + actual;
+}
+
+static unsigned char *to_hdr(void *p)
+{
+       unsigned char *hdr = p;
+
+       /* Walk back to find end of header. */
+       while (!*(--hdr));
+       assert(*hdr & TERM_BIT);
+
+       /* Now walk back to find start of header. */
+       if (!(*hdr & SINGLE_BYTE)) {
+               while (!(*(--hdr) & TERM_BIT));
+       }
+       return hdr;
+}
+
+void tiny_alloc_free(void *pool, unsigned long poolsize, void *freep)
+{
+       unsigned long len;
+       unsigned char *arr = pool;
+       unsigned char *hdr;
+       bool free;
+
+       /* Too small to do anything. */
+       if (poolsize < MIN_BLOCK_SIZE)
+               return;
+
+       hdr = to_hdr(freep);
+
+       decode(&len, &free, hdr);
+       assert(!free);
+       hdr[0] |= FREE_BIT;
+
+       /* If an empty slot, put this in free array. */
+       add_to_free_array(pool, poolsize, len, hdr - arr);
+}
+
+unsigned long tiny_alloc_size(void *pool, unsigned long poolsize, void *p)
+{
+       unsigned char *hdr = to_hdr(p);
+       unsigned long len, hdrlen;
+       bool free;
+
+       hdrlen = decode(&len, &free, hdr);
+       return len - hdrlen;
+}
+
+/* Useful for gdb breakpoints. */
+static bool tiny_check_fail(void)
+{
+       return false;
+}
+
+static bool check_decode(const unsigned char *arr, unsigned long len)
+{
+       unsigned int i;
+
+       if (len == 0)
+               return tiny_check_fail();
+       if (!(arr[0] & TERM_BIT))
+               return tiny_check_fail();
+       if (arr[0] & SINGLE_BYTE)
+               return true;
+       for (i = 1; i < len; i++) {
+               if (arr[i] & TERM_BIT)
+                       return true;
+       }
+       return tiny_check_fail();
+}
+
+bool tiny_alloc_check(void *pool, unsigned long poolsize)
+{
+       unsigned long arrsize = free_array_size(poolsize);
+       unsigned char *arr = pool;
+       unsigned long len, off, hdrlen;
+       unsigned long i, freearr[arrsize], num_freearr = 0;
+       bool free;
+
+       if (poolsize < MIN_BLOCK_SIZE)
+               return true;
+
+       for (i = 0; i + 3 < free_array_size(poolsize); i += 3) {
+               off = ((unsigned long)arr[i]) << 16
+                       | ((unsigned long)arr[i+1]) << 8
+                       | ((unsigned long)arr[i+2]);
+               if (!off)
+                       continue;
+
+               if (off >= poolsize)
+                       return tiny_check_fail();
+               freearr[num_freearr++] = off;
+       }
+
+       for (off = arrsize; off < poolsize; off += len) {
+               /* We should have a valid header. */
+               if (!check_decode(arr + off, poolsize - off))
+                       return false;
+               hdrlen = decode(&len, &free, arr + off);
+               if (off + len > poolsize)
+                       return tiny_check_fail();
+               if (hdrlen != encode_length(len - MIN_BLOCK_SIZE))
+                       return tiny_check_fail();
+               for (i = 0; i < num_freearr; i++) {
+                       if (freearr[i] == off) {
+                               if (!free)
+                                       return tiny_check_fail();
+                               memmove(&freearr[i], &freearr[i+1],
+                                       (num_freearr-- - (i + 1))
+                                       * sizeof(freearr[i]));
+                               break;
+                       }
+               }
+       }
+
+       /* Now we should have found everything in freearr. */
+       if (num_freearr)
+               return tiny_check_fail();
+
+       /* Now check that sizes are correct. */
+       for (i = 0; i + 3 < free_array_size(poolsize); i += 3) {
+               unsigned long fa_off;
+
+               off = ((unsigned long)arr[i]) << 16
+                       | ((unsigned long)arr[i+1]) << 8
+                       | ((unsigned long)arr[i+2]);
+               if (!off)
+                       continue;
+
+               decode(&len, &free, arr + off);
+
+               /* Would we expect to find this length in this bucket? */
+               if (len >= MAX_FREE_CACHED_SIZE)
+                       return tiny_check_fail();
+
+               for (fa_off = free_array_off(len);
+                    fa_off + 3 < free_array_size(poolsize);
+                    fa_off += free_array_off(MAX_FREE_CACHED_SIZE)) {
+                       if (fa_off == i)
+                               break;
+               }
+               if (fa_off != i)
+                       return tiny_check_fail();
+       }
+
+       return true;
+}
+
+/* FIXME: Implement. */
+void tiny_alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
+{
+}
diff --git a/ccan/antithread/alloc/tiny.h b/ccan/antithread/alloc/tiny.h
new file mode 100644 (file)
index 0000000..5ed4ee1
--- /dev/null
@@ -0,0 +1,15 @@
+/* Licensed under LGPLv2.1+ - see LICENSE file for details */
+#ifndef CCAN_TINY_H
+#define CCAN_TINY_H
+#include <stdbool.h>
+#include <stdio.h>
+
+void tiny_alloc_init(void *pool, unsigned long poolsize);
+void *tiny_alloc_get(void *pool, unsigned long poolsize,
+                    unsigned long size, unsigned long align);
+void tiny_alloc_free(void *pool, unsigned long poolsize, void *free);
+unsigned long tiny_alloc_size(void *pool, unsigned long poolsize, void *p);
+bool tiny_alloc_check(void *pool, unsigned long poolsize);
+void tiny_alloc_visualize(FILE *out, void *pool, unsigned long poolsize);
+
+#endif /* CCAN_TINY_H */
index 080f890cd859fc6390c19e4e1daa18025283fb30..72e172b1a244d82bd6e503510943eaf155091691 100644 (file)
@@ -15,7 +15,7 @@
 #include <ccan/noerr/noerr.h>
 #include <ccan/talloc/talloc.h>
 #include <ccan/read_write_all/read_write_all.h>
-#include <ccan/alloc/alloc.h>
+#include <ccan/antithread/alloc/alloc.h>
 #include <ccan/list/list.h>
 
 /* FIXME: Valgrind support should be possible for some cases.  Tricky