X-Git-Url: https://git.ozlabs.org/?a=blobdiff_plain;f=ccan%2Falloc%2Falloc.c;h=0aac015595019aa1b201a0ab737655e19ef04cd6;hb=c8c0c25c7c34654345458b7ca7fdee46f82d09ca;hp=693943aea140c881587020550525c1aa78571768;hpb=fbd94d9909892758d594d410bb5f981b3567fb3e;p=ccan diff --git a/ccan/alloc/alloc.c b/ccan/alloc/alloc.c index 693943ae..0aac0155 100644 --- a/ccan/alloc/alloc.c +++ b/ccan/alloc/alloc.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "config.h" /* @@ -35,7 +36,7 @@ */ /* We divide the pool into this many large pages (nearest power of 2) */ -#define MAX_LARGE_PAGES (1024UL) +#define MAX_LARGE_PAGES (256UL) /* 32 small pages == 1 large page. */ #define BITS_FROM_SMALL_TO_LARGE_PAGE 5 @@ -133,7 +134,7 @@ static unsigned int size_to_bucket(unsigned long size) static unsigned int small_page_bits(unsigned long poolsize) { - return fls(poolsize / MAX_SMALL_PAGES / 2); + return fls(poolsize / MAX_SMALL_PAGES - 1); } static struct page_header *from_pgnum(struct header *head, @@ -404,6 +405,7 @@ void alloc_init(void *pool, unsigned long poolsize) /* Add the rest of the pages as large pages. */ i = SMALL_PAGES_PER_LARGE_PAGE; while ((i << sp_bits) + (1 << lp_bits) <= poolsize) { + assert(i < MAX_SMALL_PAGES); ph = from_pgnum(head, i, sp_bits); ph->elements_used = 0; add_large_page_to_freelist(head, ph, sp_bits); @@ -489,6 +491,8 @@ static unsigned long break_up_large_page(struct header *head, for (i = 1; i < SMALL_PAGES_PER_LARGE_PAGE; i++) { struct page_header *ph = from_pgnum(head, lpage + i, sp_bits); + /* Initialize this: huge_alloc reads it. */ + ph->elements_used = 0; add_small_page_to_freelist(head, ph, sp_bits); } @@ -509,49 +513,6 @@ static u16 get_small_page(struct header *head, unsigned long poolsize, return ret; } -void where_is_page(struct header *head, struct page_header *where, - unsigned int sp_bits) -{ - struct page_header *pg; - unsigned long off, bucket, - num_buckets = max_bucket(sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE); - - for (off = head->small_free_list; off; off = pg->next) { - pg = from_pgnum(head, off, sp_bits); - if (pg == where) { - printf("It's in the small free list\n"); - return; - } - } - - for (off = head->large_free_list; off; off = pg->next) { - pg = from_pgnum(head, off, sp_bits); - if (pg == where) { - printf("It's in the large free list\n"); - return; - } - } - - for (bucket = 0; bucket < num_buckets; bucket++) { - for (off = head->bs[bucket].page_list; off; off = pg->next) { - pg = from_pgnum(head, off, sp_bits); - if (pg == where) { - printf("It's in %lu bucket page list\n", bucket); - return; - } - } - - for (off = head->bs[bucket].full_list; off; off = pg->next) { - pg = from_pgnum(head, off, sp_bits); - if (pg == where) { - printf("It's in %lu bucket full list\n", bucket); - return; - } - } - } - printf("It's nowhere!\n"); -} - static bool huge_allocated(struct header *head, unsigned long offset) { unsigned long i; @@ -566,8 +527,9 @@ static bool huge_allocated(struct header *head, unsigned long offset) } /* They want something really big. Aim for contiguous pages (slow). */ -static void *unlikely_func huge_alloc(void *pool, unsigned long poolsize, - unsigned long size, unsigned long align) +static COLD_ATTRIBUTE +void *huge_alloc(void *pool, unsigned long poolsize, + unsigned long size, unsigned long align) { struct header *head = pool; struct huge_alloc *ha; @@ -685,8 +647,8 @@ done: return (char *)pool + ha->off; } -static void unlikely_func huge_free(struct header *head, - unsigned long poolsize, void *free) +static COLD_ATTRIBUTE void +huge_free(struct header *head, unsigned long poolsize, void *free) { unsigned long i, off, pgnum, free_off = (char *)free - (char *)head; unsigned int sp_bits, lp_bits; @@ -721,7 +683,8 @@ static void unlikely_func huge_free(struct header *head, alloc_free(head, poolsize, ha); } -static unsigned long unlikely_func huge_size(struct header *head, void *p) +static COLD_ATTRIBUTE unsigned long +huge_size(struct header *head, void *p) { unsigned long i, off = (char *)p - (char *)head; struct huge_alloc *ha;