X-Git-Url: http://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Falloc%2Falloc.c;h=779d5e721e79f303b1f6ff30b017a2bc3b1191bc;hp=3f1cc75008c907e843c2b0d69f06cded79021b02;hb=774ce0359d38e0ed4a02a0e50d3c4859e6f6be59;hpb=99d6764c1ea1e763b6c9d09038fba7fa8ec61d16 diff --git a/ccan/alloc/alloc.c b/ccan/alloc/alloc.c index 3f1cc750..779d5e72 100644 --- a/ccan/alloc/alloc.c +++ b/ccan/alloc/alloc.c @@ -5,6 +5,8 @@ #include #include #include "alloc.h" +#include "bitops.h" +#include "tiny.h" #include #include #include @@ -31,24 +33,26 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#if 0 /* Until we have the tiny allocator working, go down to 1 MB */ +#if 0 /* Until we have the tiny allocator working, go down to 64k */ /* We divide the pool into this many large pages (nearest power of 2) */ -#define MAX_PAGES (1024UL) +#define MAX_LARGE_PAGES (1024UL) /* 32 small pages == 1 large page. */ #define BITS_FROM_SMALL_TO_LARGE_PAGE 5 #else -#define MAX_PAGES (128UL) +#define MAX_LARGE_PAGES (128UL) #define BITS_FROM_SMALL_TO_LARGE_PAGE 4 #endif -/* Smallest pool size for this scheme: 512-byte small pages. That's - * 3/5% overhead for 32/64 bit. */ -#define MIN_USEFUL_SIZE (MAX_PAGES << (9 + BITS_FROM_SMALL_TO_LARGE_PAGE)) +#define MAX_SMALL_PAGES (MAX_LARGE_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE) + +/* Smallest pool size for this scheme: 128-byte small pages. That's + * 9/13% overhead for 32/64 bit. */ +#define MIN_USEFUL_SIZE (MAX_SMALL_PAGES * 128) /* Every 4 buckets, we jump up a power of 2. ...8 10 12 14 16 20 24 28 32... */ #define INTER_BUCKET_SPACE 4 @@ -64,14 +68,14 @@ #define BITS_PER_LONG (sizeof(long) * CHAR_BIT) struct bucket_state { - unsigned long elements_per_page; + u32 elements_per_page; u16 page_list; u16 full_list; }; struct header { /* Bitmap of which pages are large. */ - unsigned long pagesize[MAX_PAGES / BITS_PER_LONG]; + unsigned long pagesize[MAX_LARGE_PAGES / BITS_PER_LONG]; /* List of unused small/large pages. */ u16 small_free_list; @@ -83,152 +87,12 @@ struct header { struct page_header { u16 next, prev; - u32 elements_used; - /* FIXME: Pack this in somewhere... */ - u8 bucket; + /* FIXME: We can just count all-0 and all-1 used[] elements. */ + unsigned elements_used : 25; + unsigned bucket : 7; unsigned long used[1]; /* One bit per element. */ }; -/* 2 bit for every byte to allocate. */ -static void tiny_alloc_init(void *pool, unsigned long poolsize) -{ -/* FIXME */ -} - -static void *tiny_alloc_get(void *pool, unsigned long poolsize, - unsigned long size, unsigned long align) -{ -/* FIXME */ - return NULL; -} - -static void tiny_alloc_free(void *pool, unsigned long poolsize, void *free) -{ -/* FIXME */ -} - -static unsigned long tiny_alloc_size(void *pool, unsigned long poolsize, - void *p) -{ -/* FIXME */ - return 0; -} - -static bool tiny_alloc_check(void *pool, unsigned long poolsize) -{ -/* FIXME */ - return true; -} - -static unsigned int fls(unsigned long val) -{ -#if HAVE_BUILTIN_CLZL - /* This is significantly faster! */ - return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0; -#else - unsigned int r = 32; - - if (!val) - return 0; - if (!(val & 0xffff0000u)) { - val <<= 16; - r -= 16; - } - if (!(val & 0xff000000u)) { - val <<= 8; - r -= 8; - } - if (!(val & 0xf0000000u)) { - val <<= 4; - r -= 4; - } - if (!(val & 0xc0000000u)) { - val <<= 2; - r -= 2; - } - if (!(val & 0x80000000u)) { - val <<= 1; - r -= 1; - } - return r; -#endif -} - -/* FIXME: Move to bitops. */ -static unsigned int ffsl(unsigned long val) -{ -#if HAVE_BUILTIN_FFSL - /* This is significantly faster! */ - return __builtin_ffsl(val); -#else - unsigned int r = 1; - - if (!val) - return 0; - if (sizeof(long) == sizeof(u64)) { - if (!(val & 0xffffffff)) { - /* Workaround gcc warning on 32-bit: - error: right shift count >= width of type */ - u64 tmp = val; - tmp >>= 32; - val = tmp; - r += 32; - } - } - if (!(val & 0xffff)) { - val >>= 16; - r += 16; - } - if (!(val & 0xff)) { - val >>= 8; - r += 8; - } - if (!(val & 0xf)) { - val >>= 4; - r += 4; - } - if (!(val & 3)) { - val >>= 2; - r += 2; - } - if (!(val & 1)) { - val >>= 1; - r += 1; - } - return r; -#endif -} - -static unsigned int popcount(unsigned long val) -{ -#if HAVE_BUILTIN_POPCOUNTL - return __builtin_popcountl(val); -#else - if (sizeof(long) == sizeof(u64)) { - u64 v = val; - v = (v & 0x5555555555555555ULL) - + ((v >> 1) & 0x5555555555555555ULL); - v = (v & 0x3333333333333333ULL) - + ((v >> 1) & 0x3333333333333333ULL); - v = (v & 0x0F0F0F0F0F0F0F0FULL) - + ((v >> 1) & 0x0F0F0F0F0F0F0F0FULL); - v = (v & 0x00FF00FF00FF00FFULL) - + ((v >> 1) & 0x00FF00FF00FF00FFULL); - v = (v & 0x0000FFFF0000FFFFULL) - + ((v >> 1) & 0x0000FFFF0000FFFFULL); - v = (v & 0x00000000FFFFFFFFULL) - + ((v >> 1) & 0x00000000FFFFFFFFULL); - return v; - } - val = (val & 0x55555555ULL) + ((val >> 1) & 0x55555555ULL); - val = (val & 0x33333333ULL) + ((val >> 1) & 0x33333333ULL); - val = (val & 0x0F0F0F0FULL) + ((val >> 1) & 0x0F0F0F0FULL); - val = (val & 0x00FF00FFULL) + ((val >> 1) & 0x00FF00FFULL); - val = (val & 0x0000FFFFULL) + ((val >> 1) & 0x0000FFFFULL); - return val; -#endif -} - /* * Every 4 buckets, the size doubles. * Between buckets, sizes increase linearly. @@ -267,14 +131,9 @@ static unsigned int size_to_bucket(unsigned long size) + ((overshoot * INTER_BUCKET_SPACE + (1 << base)-1) >> base); } -static unsigned int large_page_bits(unsigned long poolsize) +static unsigned int small_page_bits(unsigned long poolsize) { - return fls(poolsize / MAX_PAGES / 2); -} - -static unsigned long align_up(unsigned long x, unsigned long align) -{ - return (x + align - 1) & ~(align - 1); + return fls(poolsize / MAX_SMALL_PAGES / 2); } static struct page_header *from_pgnum(struct header *head, @@ -470,10 +329,10 @@ void alloc_init(void *pool, unsigned long poolsize) } /* We rely on page numbers fitting in 16 bit. */ - BUILD_ASSERT((MAX_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE) < 65536); + BUILD_ASSERT(MAX_SMALL_PAGES < 65536); - lp_bits = large_page_bits(poolsize); - sp_bits = lp_bits - BITS_FROM_SMALL_TO_LARGE_PAGE; + sp_bits = small_page_bits(poolsize); + lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE; num_buckets = max_bucket(lp_bits); @@ -500,7 +359,7 @@ void alloc_init(void *pool, unsigned long poolsize) /* FIXME: small pages for last bit? */ /* Split first page into small pages. */ - assert(header_size << (1UL << lp_bits)); + assert(header_size < (1UL << lp_bits)); clear_bit(head->pagesize, 0); /* Skip over page(s) used by header, add rest to free list */ @@ -630,13 +489,14 @@ void *alloc_get(void *pool, unsigned long poolsize, size = 1; bucket = size_to_bucket(size); - if (bucket >= max_bucket(large_page_bits(poolsize))) { + sp_bits = small_page_bits(poolsize); + + if (bucket >= max_bucket(sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE)) { /* FIXME: huge alloc. */ return NULL; } bs = &head->bs[bucket]; - sp_bits = large_page_bits(poolsize) - BITS_FROM_SMALL_TO_LARGE_PAGE; if (!bs->page_list) { struct page_header *ph; @@ -688,7 +548,7 @@ void alloc_free(void *pool, unsigned long poolsize, void *free) } /* Get page header. */ - sp_bits = large_page_bits(poolsize) - BITS_FROM_SMALL_TO_LARGE_PAGE; + sp_bits = small_page_bits(poolsize); pgnum = offset >> sp_bits; /* Big page? Round down further. */ @@ -736,7 +596,7 @@ unsigned long alloc_size(void *pool, unsigned long poolsize, void *p) return tiny_alloc_size(pool, poolsize, p); /* Get page header. */ - sp_bits = large_page_bits(poolsize) - BITS_FROM_SMALL_TO_LARGE_PAGE; + sp_bits = small_page_bits(poolsize); pgnum = offset >> sp_bits; /* Big page? Round down further. */ @@ -795,8 +655,8 @@ static bool check_bucket(struct header *head, struct page_header *ph; unsigned long taken, i, prev, pagesize, sp_bits, lp_bits; - lp_bits = large_page_bits(poolsize); - sp_bits = lp_bits - BITS_FROM_SMALL_TO_LARGE_PAGE; + sp_bits = small_page_bits(poolsize); + lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE; lp_bucket = large_page_bucket(bindex, sp_bits); @@ -892,14 +752,13 @@ bool alloc_check(void *pool, unsigned long poolsize) struct header *head = pool; unsigned long prev, i, lp_bits, sp_bits, header_size, num_buckets; struct page_header *ph; - unsigned long pages[(MAX_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE) - / BITS_PER_LONG] = { 0 }; + unsigned long pages[MAX_SMALL_PAGES / BITS_PER_LONG] = { 0 }; if (poolsize < MIN_USEFUL_SIZE) return tiny_alloc_check(pool, poolsize); - lp_bits = large_page_bits(poolsize); - sp_bits = lp_bits - BITS_FROM_SMALL_TO_LARGE_PAGE; + sp_bits = small_page_bits(poolsize); + lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE; num_buckets = max_bucket(lp_bits); @@ -1049,10 +908,6 @@ static unsigned long visualize_bucket(FILE *out, struct header *head, return overhead; } -static void tiny_alloc_visualize(FILE *out, void *pool, unsigned long poolsize) -{ -} - void alloc_visualize(FILE *out, void *pool, unsigned long poolsize) { struct header *head = pool; @@ -1067,8 +922,8 @@ void alloc_visualize(FILE *out, void *pool, unsigned long poolsize) return; } - lp_bits = large_page_bits(poolsize); - sp_bits = lp_bits - BITS_FROM_SMALL_TO_LARGE_PAGE; + sp_bits = small_page_bits(poolsize); + lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE; num_buckets = max_bucket(lp_bits); header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);