X-Git-Url: http://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Falloc%2Falloc.c;h=e9aaf3ddea8c7c7727f6a65c5ccd4a516209307c;hp=bc3760abc0797d43e10ab1dbed920d28bd501c24;hb=fbc7877063275421c4313ea30a65378d48dd4f07;hpb=7f9d956574d30f70d2260f4b7694f481e3765173 diff --git a/ccan/alloc/alloc.c b/ccan/alloc/alloc.c index bc3760ab..e9aaf3dd 100644 --- a/ccan/alloc/alloc.c +++ b/ccan/alloc/alloc.c @@ -9,7 +9,9 @@ #include "tiny.h" #include #include +#include #include +#include #include "config.h" /* @@ -33,21 +35,12 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#if 0 /* Until we have the tiny allocator working, go down to 64k */ - /* We divide the pool into this many large pages (nearest power of 2) */ -#define MAX_LARGE_PAGES (1024UL) +#define MAX_LARGE_PAGES (256UL) /* 32 small pages == 1 large page. */ #define BITS_FROM_SMALL_TO_LARGE_PAGE 5 -#else - -#define MAX_LARGE_PAGES (128UL) -#define BITS_FROM_SMALL_TO_LARGE_PAGE 4 - -#endif - #define MAX_SMALL_PAGES (MAX_LARGE_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE) /* Smallest pool size for this scheme: 128-byte small pages. That's @@ -81,10 +74,18 @@ struct header { u16 small_free_list; u16 large_free_list; + /* List of huge allocs. */ + unsigned long huge; + /* This is less defined: we have two buckets for each power of 2 */ struct bucket_state bs[1]; }; +struct huge_alloc { + unsigned long next, prev; + unsigned long off, len; +}; + struct page_header { u16 next, prev; /* FIXME: We can just count all-0 and all-1 used[] elements. */ @@ -107,7 +108,7 @@ struct page_header { */ static unsigned long bucket_to_size(unsigned int bucket) { - unsigned long base = 1 << (bucket / INTER_BUCKET_SPACE); + unsigned long base = 1UL << (bucket / INTER_BUCKET_SPACE); return base + ((bucket % INTER_BUCKET_SPACE) << (bucket / INTER_BUCKET_SPACE)) / INTER_BUCKET_SPACE; @@ -126,14 +127,14 @@ static unsigned int size_to_bucket(unsigned long size) unsigned int base = fls(size/2); unsigned long overshoot; - overshoot = size - (1 << base); + overshoot = size - (1UL << base); return base * INTER_BUCKET_SPACE - + ((overshoot * INTER_BUCKET_SPACE + (1 << base)-1) >> base); + + ((overshoot * INTER_BUCKET_SPACE + (1UL << base)-1) >> base); } static unsigned int small_page_bits(unsigned long poolsize) { - return fls(poolsize / MAX_SMALL_PAGES / 2); + return fls(poolsize / MAX_SMALL_PAGES - 1); } static struct page_header *from_pgnum(struct header *head, @@ -166,7 +167,7 @@ static unsigned long page_header_size(unsigned int align_bits, size = sizeof(struct page_header) - sizeof(((struct page_header *)0)->used) + used_size(num_elements); - return align_up(size, 1 << align_bits); + return align_up(size, 1UL << align_bits); } static void add_to_list(struct header *head, @@ -215,6 +216,36 @@ static u16 pop_from_list(struct header *head, return h; } +static void add_to_huge_list(struct header *head, struct huge_alloc *ha) +{ + unsigned long h = head->huge; + unsigned long offset = (char *)ha - (char *)head; + + ha->next = h; + if (h) { + struct huge_alloc *prev = (void *)((char *)head + h); + assert(prev->prev == 0); + prev->prev = offset; + } + head->huge = offset; + ha->prev = 0; +} + +static void del_from_huge(struct header *head, struct huge_alloc *ha) +{ + /* Front of list? */ + if (ha->prev == 0) { + head->huge = ha->next; + } else { + struct huge_alloc *prev = (void *)((char *)head + ha->prev); + prev->next = ha->next; + } + if (ha->next != 0) { + struct huge_alloc *next = (void *)((char *)head + ha->next); + next->prev = ha->prev; + } +} + static void add_small_page_to_freelist(struct header *head, struct page_header *ph, unsigned int sp_bits) @@ -263,17 +294,17 @@ static void add_to_bucket_full_list(struct header *head, static void clear_bit(unsigned long bitmap[], unsigned int off) { - bitmap[off / BITS_PER_LONG] &= ~(1 << (off % BITS_PER_LONG)); + bitmap[off / BITS_PER_LONG] &= ~(1UL << (off % BITS_PER_LONG)); } static bool test_bit(const unsigned long bitmap[], unsigned int off) { - return bitmap[off / BITS_PER_LONG] & (1 << (off % BITS_PER_LONG)); + return bitmap[off / BITS_PER_LONG] & (1UL << (off % BITS_PER_LONG)); } static void set_bit(unsigned long bitmap[], unsigned int off) { - bitmap[off / BITS_PER_LONG] |= (1 << (off % BITS_PER_LONG)); + bitmap[off / BITS_PER_LONG] |= (1UL << (off % BITS_PER_LONG)); } /* There must be a bit to be found. */ @@ -293,7 +324,7 @@ static unsigned long elements_per_page(unsigned long align_bits, unsigned long num, overhead; /* First approximation: no extra room for bitmap. */ - overhead = align_up(sizeof(struct page_header), 1 << align_bits); + overhead = align_up(sizeof(struct page_header), 1UL << align_bits); num = (psize - overhead) / esize; while (page_header_size(align_bits, num) + esize * num > psize) @@ -359,11 +390,11 @@ void alloc_init(void *pool, unsigned long poolsize) /* FIXME: small pages for last bit? */ /* Split first page into small pages. */ - assert(header_size << (1UL << lp_bits)); + assert(header_size < (1UL << lp_bits)); clear_bit(head->pagesize, 0); /* Skip over page(s) used by header, add rest to free list */ - for (i = align_up(header_size, (1 << sp_bits)) >> sp_bits; + for (i = align_up(header_size, (1UL << sp_bits)) >> sp_bits; i < SMALL_PAGES_PER_LARGE_PAGE; i++) { ph = from_pgnum(head, i, sp_bits); @@ -373,7 +404,8 @@ void alloc_init(void *pool, unsigned long poolsize) /* Add the rest of the pages as large pages. */ i = SMALL_PAGES_PER_LARGE_PAGE; - while ((i << sp_bits) + (1 << lp_bits) <= poolsize) { + while ((i << sp_bits) + (1UL << lp_bits) <= poolsize) { + assert(i < MAX_SMALL_PAGES); ph = from_pgnum(head, i, sp_bits); ph->elements_used = 0; add_large_page_to_freelist(head, ph, sp_bits); @@ -409,16 +441,11 @@ static bool all_empty(struct header *head, return true; } -static u16 get_large_page(struct header *head, unsigned long poolsize, - unsigned int sp_bits) +static void recombine_small_pages(struct header *head, unsigned long poolsize, + unsigned int sp_bits) { - unsigned int lp_bits, i, page; - - lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE; - - page = pop_from_list(head, &head->large_free_list, sp_bits); - if (likely(page)) - return page; + unsigned long i; + unsigned int lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE; /* Look for small pages to coalesce, after first large page. */ for (i = SMALL_PAGES_PER_LARGE_PAGE; @@ -435,7 +462,19 @@ static u16 get_large_page(struct header *head, unsigned long poolsize, add_large_page_to_freelist(head, ph, sp_bits); } } - +} + +static u16 get_large_page(struct header *head, unsigned long poolsize, + unsigned int sp_bits) +{ + unsigned int page; + + page = pop_from_list(head, &head->large_free_list, sp_bits); + if (likely(page)) + return page; + + recombine_small_pages(head, poolsize, sp_bits); + return pop_from_list(head, &head->large_free_list, sp_bits); } @@ -450,6 +489,8 @@ static unsigned long break_up_large_page(struct header *head, for (i = 1; i < SMALL_PAGES_PER_LARGE_PAGE; i++) { struct page_header *ph = from_pgnum(head, lpage + i, sp_bits); + /* Initialize this: huge_alloc reads it. */ + ph->elements_used = 0; add_small_page_to_freelist(head, ph, sp_bits); } @@ -470,6 +511,193 @@ static u16 get_small_page(struct header *head, unsigned long poolsize, return ret; } +static bool huge_allocated(struct header *head, unsigned long offset) +{ + unsigned long i; + struct huge_alloc *ha; + + for (i = head->huge; i; i = ha->next) { + ha = (void *)((char *)head + i); + if (ha->off <= offset && ha->off + ha->len > offset) + return true; + } + return false; +} + +/* They want something really big. Aim for contiguous pages (slow). */ +static COLD void *huge_alloc(void *pool, unsigned long poolsize, + unsigned long size, unsigned long align) +{ + struct header *head = pool; + struct huge_alloc *ha; + unsigned long i, sp_bits, lp_bits, num, header_size; + + sp_bits = small_page_bits(poolsize); + lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE; + + /* Allocate tracking structure optimistically. */ + ha = alloc_get(pool, poolsize, sizeof(*ha), ALIGNOF(*ha)); + if (!ha) + return NULL; + + /* First search for contiguous small pages... */ + header_size = sizeof(*head) + sizeof(head->bs) * (max_bucket(lp_bits)-1); + + num = 0; + for (i = (header_size + (1UL << sp_bits) - 1) >> sp_bits; + i << sp_bits < poolsize; + i++) { + struct page_header *pg; + unsigned long off = (i << sp_bits); + + /* Skip over large pages. */ + if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) { + i += (1UL << BITS_FROM_SMALL_TO_LARGE_PAGE)-1; + continue; + } + + /* Does this page meet alignment requirements? */ + if (!num && off % align != 0) + continue; + + /* FIXME: This makes us O(n^2). */ + if (huge_allocated(head, off)) { + num = 0; + continue; + } + + pg = (struct page_header *)((char *)head + off); + if (pg->elements_used) { + num = 0; + continue; + } + + num++; + if (num << sp_bits >= size) { + unsigned long pgnum; + + /* Remove from free list. */ + for (pgnum = i; pgnum > i - num; pgnum--) { + pg = from_pgnum(head, pgnum, sp_bits); + del_from_list(head, + &head->small_free_list, + pg, sp_bits); + } + ha->off = (i - num + 1) << sp_bits; + ha->len = num << sp_bits; + goto done; + } + } + + /* Now search for large pages... */ + recombine_small_pages(head, poolsize, sp_bits); + + num = 0; + for (i = (header_size + (1UL << lp_bits) - 1) >> lp_bits; + (i << lp_bits) < poolsize; i++) { + struct page_header *pg; + unsigned long off = (i << lp_bits); + + /* Ignore small pages. */ + if (!test_bit(head->pagesize, i)) + continue; + + /* Does this page meet alignment requirements? */ + if (!num && off % align != 0) + continue; + + /* FIXME: This makes us O(n^2). */ + if (huge_allocated(head, off)) { + num = 0; + continue; + } + + pg = (struct page_header *)((char *)head + off); + if (pg->elements_used) { + num = 0; + continue; + } + + num++; + if (num << lp_bits >= size) { + unsigned long pgnum; + + /* Remove from free list. */ + for (pgnum = i; pgnum > i - num; pgnum--) { + pg = from_pgnum(head, pgnum, lp_bits); + del_from_list(head, + &head->large_free_list, + pg, sp_bits); + } + ha->off = (i - num + 1) << lp_bits; + ha->len = num << lp_bits; + goto done; + } + } + + /* Unable to satisfy: free huge alloc structure. */ + alloc_free(pool, poolsize, ha); + return NULL; + +done: + add_to_huge_list(pool, ha); + return (char *)pool + ha->off; +} + +static COLD void +huge_free(struct header *head, unsigned long poolsize, void *free) +{ + unsigned long i, off, pgnum, free_off = (char *)free - (char *)head; + unsigned int sp_bits, lp_bits; + struct huge_alloc *ha; + + for (i = head->huge; i; i = ha->next) { + ha = (void *)((char *)head + i); + if (free_off == ha->off) + break; + } + assert(i); + + /* Free up all the pages, delete and free ha */ + sp_bits = small_page_bits(poolsize); + lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE; + pgnum = free_off >> sp_bits; + + if (test_bit(head->pagesize, pgnum >> BITS_FROM_SMALL_TO_LARGE_PAGE)) { + for (off = ha->off; + off < ha->off + ha->len; + off += 1UL << lp_bits) { + add_large_page_to_freelist(head, + (void *)((char *)head + off), + sp_bits); + } + } else { + for (off = ha->off; + off < ha->off + ha->len; + off += 1UL << sp_bits) { + add_small_page_to_freelist(head, + (void *)((char *)head + off), + sp_bits); + } + } + del_from_huge(head, ha); + alloc_free(head, poolsize, ha); +} + +static COLD unsigned long huge_size(struct header *head, void *p) +{ + unsigned long i, off = (char *)p - (char *)head; + struct huge_alloc *ha; + + for (i = head->huge; i; i = ha->next) { + ha = (void *)((char *)head + i); + if (off == ha->off) { + return ha->len; + } + } + abort(); +} + void *alloc_get(void *pool, unsigned long poolsize, unsigned long size, unsigned long align) { @@ -492,8 +720,7 @@ void *alloc_get(void *pool, unsigned long poolsize, sp_bits = small_page_bits(poolsize); if (bucket >= max_bucket(sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE)) { - /* FIXME: huge alloc. */ - return NULL; + return huge_alloc(pool, poolsize, size, align); } bs = &head->bs[bucket]; @@ -560,6 +787,11 @@ void alloc_free(void *pool, unsigned long poolsize, void *free) /* Step back to page header. */ ph = from_pgnum(head, pgnum, sp_bits); + if ((void *)ph == free) { + huge_free(head, poolsize, free); + return; + } + bs = &head->bs[ph->bucket]; pgoffset = offset - (pgnum << sp_bits) - page_header_size(ph->bucket / INTER_BUCKET_SPACE, @@ -605,6 +837,9 @@ unsigned long alloc_size(void *pool, unsigned long poolsize, void *p) /* Step back to page header. */ ph = from_pgnum(head, pgnum, sp_bits); + if ((void *)ph == p) + return huge_size(head, p); + return bucket_to_size(ph->bucket); } @@ -752,6 +987,7 @@ bool alloc_check(void *pool, unsigned long poolsize) struct header *head = pool; unsigned long prev, i, lp_bits, sp_bits, header_size, num_buckets; struct page_header *ph; + struct huge_alloc *ha; unsigned long pages[MAX_SMALL_PAGES / BITS_PER_LONG] = { 0 }; if (poolsize < MIN_USEFUL_SIZE) @@ -772,7 +1008,7 @@ bool alloc_check(void *pool, unsigned long poolsize) prev = 0; for (i = head->small_free_list; i; i = ph->next) { /* Bad pointer? */ - if (out_of_bounds(i, sp_bits, 1 << sp_bits, poolsize)) + if (out_of_bounds(i, sp_bits, 1UL << sp_bits, poolsize)) return check_fail(); /* Large page? */ if (test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) @@ -792,7 +1028,7 @@ bool alloc_check(void *pool, unsigned long poolsize) prev = 0; for (i = head->large_free_list; i; i = ph->next) { /* Bad pointer? */ - if (out_of_bounds(i, sp_bits, 1 << lp_bits, poolsize)) + if (out_of_bounds(i, sp_bits, 1UL << lp_bits, poolsize)) return check_fail(); /* Not large page? */ if (!test_bit(head->pagesize, i >> BITS_FROM_SMALL_TO_LARGE_PAGE)) @@ -819,6 +1055,46 @@ bool alloc_check(void *pool, unsigned long poolsize) return false; } + /* Check the huge alloc list. */ + prev = 0; + for (i = head->huge; i; i = ha->next) { + unsigned long pgbits, j; + + /* Bad pointer? */ + if (i >= poolsize || i + sizeof(*ha) > poolsize) + return check_fail(); + ha = (void *)((char *)head + i); + + /* Check contents of ha. */ + if (ha->off > poolsize || ha->off + ha->len > poolsize) + return check_fail(); + + /* Large or small page? */ + pgbits = test_bit(head->pagesize, ha->off >> lp_bits) + ? lp_bits : sp_bits; + + /* Not page boundary? */ + if ((ha->off % (1UL << pgbits)) != 0) + return check_fail(); + + /* Not page length? */ + if ((ha->len % (1UL << pgbits)) != 0) + return check_fail(); + + /* Linked list corrupt? */ + if (ha->prev != prev) + return check_fail(); + + for (j = ha->off; j < ha->off + ha->len; j += (1UL<> sp_bits)) + return check_fail(); + set_bit(pages, j >> sp_bits); + } + + prev = i; + } + /* Make sure every page accounted for. */ for (i = 0; i < poolsize >> sp_bits; i++) { if (!test_bit(pages, i)) @@ -896,7 +1172,7 @@ static unsigned long visualize_bucket(FILE *out, struct header *head, overhead += print_overhead(out, "page post-header alignments", (hdr_size - hdr_min) * num_pages, poolsize); /* Between last element and end of page. */ - page_size = (1 << sp_bits); + page_size = (1UL << sp_bits); if (large_page_bucket(bucket, sp_bits)) page_size <<= BITS_FROM_SMALL_TO_LARGE_PAGE; @@ -931,11 +1207,11 @@ void alloc_visualize(FILE *out, void *pool, unsigned long poolsize) fprintf(out, "Large page size %lu, small page size %lu.\n", 1UL << lp_bits, 1UL << sp_bits); overhead += print_overhead(out, "unused pool tail", - poolsize % (1 << lp_bits), poolsize); + poolsize % (1UL << lp_bits), poolsize); fprintf(out, "Main header %lu bytes (%lu small pages).\n", - header_size, align_up(header_size, 1 << sp_bits) >> sp_bits); + header_size, align_up(header_size, 1UL << sp_bits) >> sp_bits); overhead += print_overhead(out, "partial header page", - align_up(header_size, 1 << sp_bits) + align_up(header_size, 1UL << sp_bits) - header_size, poolsize); /* Total large pages. */ i = count_bits(head->pagesize, poolsize >> lp_bits); @@ -945,7 +1221,7 @@ void alloc_visualize(FILE *out, void *pool, unsigned long poolsize) count, i, count ? 100.0 * count / i : 0.0); /* Total small pages. */ - i = (poolsize >> lp_bits) - i; + i = ((poolsize >> lp_bits) - i) << BITS_FROM_SMALL_TO_LARGE_PAGE; /* Used pages */ count = i - count_list(head, head->small_free_list, sp_bits, NULL); fprintf(out, "%lu/%lu small pages used (%.3g%%)\n",