X-Git-Url: http://git.ozlabs.org/?a=blobdiff_plain;f=alloc%2Falloc.c;h=4502b9389fd1f811ef21f63b41435f4b1c65f49d;hb=e6737e85a68414efcda00985e278490ffdc6fd2c;hp=796d64e07c6254ed46aaed89d54f0384653d0383;hpb=4f5988a8db7562a54be9125fb67895045a0f5528;p=ccan diff --git a/alloc/alloc.c b/alloc/alloc.c index 796d64e0..4502b938 100644 --- a/alloc/alloc.c +++ b/alloc/alloc.c @@ -35,32 +35,65 @@ */ struct metaheader { - /* Length (after this header). (FIXME: Could be in pages). */ + /* Length (after this header). (FIXME: implied by page bits!). */ unsigned long metalen; /* Next meta header, or 0 */ unsigned long next; /* Bits start here. */ }; -#define BITS_PER_PAGE 2 -/* FIXME: Don't use page states for bitblock. It's tacky and confusing. */ -enum page_state +/* Assumes a is a power of two. */ +static unsigned long align_up(unsigned long x, unsigned long a) +{ + return (x + a - 1) & ~(a - 1); +} + +static unsigned long div_up(unsigned long x, unsigned long a) +{ + return (x + a - 1) / a; +} + +/* It turns out that we spend a lot of time dealing with bit pairs. + * These routines manipulate them. + */ +static uint8_t get_bit_pair(const uint8_t *bits, unsigned long index) +{ + return bits[index * 2 / CHAR_BIT] >> (index * 2 % CHAR_BIT) & 3; +} + +static void set_bit_pair(uint8_t *bits, unsigned long index, uint8_t val) +{ + bits[index * 2 / CHAR_BIT] &= ~(3 << (index * 2 % CHAR_BIT)); + bits[index * 2 / CHAR_BIT] |= (val << (index * 2 % CHAR_BIT)); +} + +/* This is used for page states and subpage allocations */ +enum alloc_state { FREE, TAKEN, TAKEN_START, - SUBPAGE, + SPECIAL, /* Sub-page allocation for page states. */ }; -/* Assumes a is a power of two. */ -static unsigned long align_up(unsigned long x, unsigned long a) +/* The types for subpage metadata. */ +enum sub_metadata_type { - return (x + a - 1) & ~(a - 1); + /* FREE is same as alloc state */ + BITMAP = 1, +}; + +/* Page states are represented by bitpairs, at the start of the pool. */ +#define BITS_PER_PAGE 2 + +static enum alloc_state get_page_state(const void *pool, unsigned long page) +{ + return get_bit_pair(pool, page); } -static unsigned long div_up(unsigned long x, unsigned long a) +static void set_page_state(void *pool, unsigned long page, enum alloc_state s) { - return (x + a - 1) / a; + set_bit_pair(pool, page, s); } /* The offset of metadata for a subpage allocation is found at the end @@ -69,24 +102,13 @@ static unsigned long div_up(unsigned long x, unsigned long a) /* This is the length of metadata in bits. It consists of two bits * for every BITMAP_GRANULARITY of usable bytes in the page, then two - * bits for the TAKEN tailer.. */ + * bits for the tailer.. */ #define BITMAP_METABITLEN \ ((div_up(SUBPAGE_METAOFF, BITMAP_GRANULARITY) + 1) * BITS_PER_PAGE) /* This is the length in bytes. */ #define BITMAP_METALEN (div_up(BITMAP_METABITLEN, CHAR_BIT)) -static enum page_state get_page_state(const uint8_t *bits, unsigned long page) -{ - return bits[page * 2 / CHAR_BIT] >> (page * 2 % CHAR_BIT) & 3; -} - -static void set_page_state(uint8_t *bits, unsigned long page, enum page_state s) -{ - bits[page * 2 / CHAR_BIT] &= ~(3 << (page * 2 % CHAR_BIT)); - bits[page * 2 / CHAR_BIT] |= ((uint8_t)s << (page * 2 % CHAR_BIT)); -} - static struct metaheader *first_mheader(void *pool, unsigned long poolsize) { unsigned int pagestatelen; @@ -136,9 +158,9 @@ void alloc_init(void *pool, unsigned long poolsize) set_page_state(pool, i, TAKEN); } -/* Two bits per element, representing page states. Returns 0 on fail. */ -static unsigned long alloc_from_bitmap(uint8_t *bits, unsigned long elems, - unsigned long want, unsigned long align) +/* Two bits per element, representing page states. Returns -1 on fail. */ +static long alloc_from_bitmap(uint8_t *bits, unsigned long elems, + unsigned long want, unsigned long align) { long i; unsigned long free; @@ -146,7 +168,7 @@ static unsigned long alloc_from_bitmap(uint8_t *bits, unsigned long elems, free = 0; /* We allocate from far end, to increase ability to expand metadata. */ for (i = elems - 1; i >= 0; i--) { - switch (get_page_state(bits, i)) { + switch (get_bit_pair(bits, i)) { case FREE: if (++free >= want) { unsigned long j; @@ -156,12 +178,12 @@ static unsigned long alloc_from_bitmap(uint8_t *bits, unsigned long elems, continue; for (j = i+1; j < i + want; j++) - set_page_state(bits, j, TAKEN); - set_page_state(bits, i, TAKEN_START); + set_bit_pair(bits, j, TAKEN); + set_bit_pair(bits, i, TAKEN_START); return i; } break; - case SUBPAGE: + case SPECIAL: case TAKEN_START: case TAKEN: free = 0; @@ -169,7 +191,7 @@ static unsigned long alloc_from_bitmap(uint8_t *bits, unsigned long elems, } } - return 0; + return -1; } static unsigned long alloc_get_pages(void *pool, unsigned long poolsize, @@ -198,7 +220,7 @@ static unsigned long alloc_get_pages(void *pool, unsigned long poolsize, return i; } break; - case SUBPAGE: + case SPECIAL: case TAKEN_START: case TAKEN: free = 0; @@ -226,15 +248,14 @@ static void set_page_metadata(void *pool, unsigned long page, uint8_t *meta) *metadata_off(pool, page) = meta - (uint8_t *)pool; } -static void *sub_page_alloc(void *pool, unsigned long page, - unsigned long size, unsigned long align) +static unsigned long sub_page_alloc(void *pool, unsigned long page, + unsigned long size, unsigned long align) { uint8_t *bits = get_page_metadata(pool, page); - unsigned long i; + long i; /* TAKEN at end means a bitwise alloc. */ - assert(get_page_state(bits, getpagesize()/BITMAP_GRANULARITY - 1) - == TAKEN); + assert(get_bit_pair(bits, getpagesize()/BITMAP_GRANULARITY-1) == TAKEN); /* Our bits are the same as the page bits. */ i = alloc_from_bitmap(bits, SUBPAGE_METAOFF/BITMAP_GRANULARITY, @@ -242,10 +263,10 @@ static void *sub_page_alloc(void *pool, unsigned long page, align / BITMAP_GRANULARITY); /* Can't allocate? */ - if (i == 0) - return NULL; + if (i < 0) + return 0; - return (char *)pool + page*getpagesize() + i*BITMAP_GRANULARITY; + return page*getpagesize() + i*BITMAP_GRANULARITY; } static uint8_t *alloc_metaspace(struct metaheader *mh, unsigned long bytes) @@ -256,7 +277,7 @@ static uint8_t *alloc_metaspace(struct metaheader *mh, unsigned long bytes) /* TAKEN tags end a subpage alloc. */ for (i = mh->metalen * CHAR_BIT / BITS_PER_PAGE - 1; i >= 0; i -= len) { - switch (get_page_state(meta, i)) { + switch (get_bit_pair(meta, i)) { case FREE: len = 1; free++; @@ -266,7 +287,7 @@ static uint8_t *alloc_metaspace(struct metaheader *mh, unsigned long bytes) return meta + i / (CHAR_BIT / BITS_PER_PAGE); } break; - case TAKEN: + case BITMAP: /* Skip over this allocated part. */ len = BITMAP_METALEN * CHAR_BIT / BITS_PER_PAGE; free = 0; @@ -326,7 +347,7 @@ static uint8_t *new_metadata(void *pool, unsigned long poolsize, /* Sew it into linked list */ mh = first_mheader(pool,poolsize); newmh->next = mh->next; - mh->next = (char *)newmh - (char *)pool; + mh->next = pool_offset(pool, newmh); return alloc_metaspace(newmh, bytes); } @@ -339,16 +360,16 @@ static void alloc_free_pages(void *pool, unsigned long pagenum) set_page_state(pool, pagenum, FREE); } -static void *alloc_sub_page(void *pool, unsigned long poolsize, - unsigned long size, unsigned long align) +static unsigned long alloc_sub_page(void *pool, unsigned long poolsize, + unsigned long size, unsigned long align) { unsigned long i; uint8_t *metadata; /* Look for partial page. */ for (i = 0; i < poolsize / getpagesize(); i++) { - void *ret; - if (get_page_state(pool, i) != SUBPAGE) + unsigned long ret; + if (get_page_state(pool, i) != SPECIAL) continue; ret = sub_page_alloc(pool, i, size, align); @@ -359,17 +380,17 @@ static void *alloc_sub_page(void *pool, unsigned long poolsize, /* Create new SUBPAGE page. */ i = alloc_get_pages(pool, poolsize, 1, 1); if (i == 0) - return NULL; + return 0; /* Get metadata for page. */ metadata = new_metadata(pool, poolsize, BITMAP_METALEN); if (!metadata) { alloc_free_pages(pool, i); - return NULL; + return 0; } - /* Actually, this is a SUBPAGE page now. */ - set_page_state(pool, i, SUBPAGE); + /* Actually, this is a subpage page now. */ + set_page_state(pool, i, SPECIAL); /* Set metadata pointer for page. */ set_page_metadata(pool, i, metadata); @@ -378,23 +399,111 @@ static void *alloc_sub_page(void *pool, unsigned long poolsize, return sub_page_alloc(pool, i, size, align); } +/* Returns true if we cleaned any pages. */ +static bool clean_empty_subpages(void *pool, unsigned long poolsize) +{ + unsigned long i; + bool progress = false; + + for (i = 0; i < poolsize/getpagesize(); i++) { + uint8_t *meta; + unsigned int j; + if (get_page_state(pool, i) != SPECIAL) + continue; + + meta = get_page_metadata(pool, i); + for (j = 0; j < SUBPAGE_METAOFF/BITMAP_GRANULARITY; j++) + if (get_page_state(meta, j) != FREE) + break; + + /* So, is this page totally empty? */ + if (j == SUBPAGE_METAOFF/BITMAP_GRANULARITY) { + set_page_state(pool, i, FREE); + progress = true; + } + } + return progress; +} + +/* Returns true if we cleaned any pages. */ +static bool clean_metadata(void *pool, unsigned long poolsize) +{ + struct metaheader *mh, *prev_mh = NULL; + bool progress = false; + + for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){ + uint8_t *meta; + long i; + + meta = (uint8_t *)(mh + 1); + BUILD_ASSERT(FREE == 0); + for (i = mh->metalen - 1; i > 0; i--) + if (meta[i] != 0) + break; + + /* Completely empty? */ + if (prev_mh && i == mh->metalen) { + alloc_free_pages(pool, + pool_offset(pool, mh)/getpagesize()); + prev_mh->next = mh->next; + mh = prev_mh; + progress = true; + } else { + uint8_t *p; + + /* Some pages at end are free? */ + for (p = (uint8_t *)(mh+1)+mh->metalen - getpagesize(); + p > meta + i; + p -= getpagesize()) { + set_page_state(pool, + pool_offset(pool, p) + / getpagesize(), + FREE); + progress = true; + } + } + } + + return progress; +} + void *alloc_get(void *pool, unsigned long poolsize, unsigned long size, unsigned long align) { + bool subpage_clean = false, metadata_clean = false; + unsigned long ret; + if (poolsize < MIN_SIZE) return NULL; - /* Sub-page allocations have an overhead of 25%. */ - if (size + size/4 >= getpagesize() || align >= getpagesize()) { - unsigned long ret, pages = div_up(size, getpagesize()); +again: + /* Sub-page allocations have an overhead of ~12%. */ + if (size + size/8 >= getpagesize() || align >= getpagesize()) { + unsigned long pages = div_up(size, getpagesize()); - ret = alloc_get_pages(pool, poolsize, pages, align); - if (ret == 0) - return NULL; - return (char *)pool + ret * getpagesize(); + ret = alloc_get_pages(pool, poolsize, pages, align) + * getpagesize(); + } else + ret = alloc_sub_page(pool, poolsize, size, align); + + if (ret != 0) + return (char *)pool + ret; + + /* Allocation failed: garbage collection. */ + if (!subpage_clean) { + subpage_clean = true; + if (clean_empty_subpages(pool, poolsize)) + goto again; + } + + if (!metadata_clean) { + metadata_clean = true; + if (clean_metadata(pool, poolsize)) + goto again; } - return alloc_sub_page(pool, poolsize, size, align); + /* FIXME: Compact metadata? */ + return NULL; } static void subpage_free(void *pool, unsigned long pagenum, void *free) @@ -413,8 +522,6 @@ static void subpage_free(void *pool, unsigned long pagenum, void *free) while (off < SUBPAGE_METAOFF / BITMAP_GRANULARITY && get_page_state(metadata, off) == TAKEN) set_page_state(metadata, off++, FREE); - - /* FIXME: If whole page free, free page and metadata. */ } void alloc_free(void *pool, unsigned long poolsize, void *free) @@ -433,7 +540,7 @@ void alloc_free(void *pool, unsigned long poolsize, void *free) pagenum = pool_offset(pool, free) / getpagesize(); - if (get_page_state(pool, pagenum) == SUBPAGE) + if (get_page_state(pool, pagenum) == SPECIAL) subpage_free(pool, pagenum, free); else { assert((unsigned long)free % getpagesize() == 0); @@ -462,7 +569,7 @@ static bool check_subpage(void *pool, unsigned long poolsize, { unsigned long *mhoff = metadata_off(pool, page); unsigned int i; - enum page_state last_state = FREE; + enum alloc_state last_state = FREE; if (*mhoff + sizeof(struct metaheader) > poolsize) return false; @@ -475,16 +582,16 @@ static bool check_subpage(void *pool, unsigned long poolsize, return false; /* Marker at end of subpage allocation is "taken" */ - if (get_page_state((uint8_t *)pool + *mhoff, - getpagesize()/BITMAP_GRANULARITY - 1) != TAKEN) + if (get_bit_pair((uint8_t *)pool + *mhoff, + getpagesize()/BITMAP_GRANULARITY - 1) != TAKEN) return false; for (i = 0; i < SUBPAGE_METAOFF / BITMAP_GRANULARITY; i++) { - enum page_state state; + enum alloc_state state; - state = get_page_state((uint8_t *)pool + *mhoff, i); + state = get_bit_pair((uint8_t *)pool + *mhoff, i); switch (state) { - case SUBPAGE: + case SPECIAL: return false; case TAKEN: if (last_state == FREE) @@ -502,7 +609,7 @@ bool alloc_check(void *pool, unsigned long poolsize) { unsigned long i; struct metaheader *mh; - enum page_state last_state = FREE; + enum alloc_state last_state = FREE; bool was_metadata = false; if (poolsize < MIN_SIZE) @@ -535,7 +642,7 @@ bool alloc_check(void *pool, unsigned long poolsize) } for (i = 0; i < poolsize / getpagesize(); i++) { - enum page_state state = get_page_state(pool, i); + enum alloc_state state = get_page_state(pool, i); bool is_metadata = is_metadata_page(pool, poolsize,i); switch (state) { @@ -552,7 +659,7 @@ bool alloc_check(void *pool, unsigned long poolsize) if (is_metadata != was_metadata) return false; break; - case SUBPAGE: + case SPECIAL: /* Check metadata pointer etc. */ if (!check_subpage(pool, poolsize, i)) return false; @@ -630,7 +737,7 @@ void alloc_visualize(FILE *out, void *pool, unsigned long poolsize) for (i = 0; i < poolsize / getpagesize(); i++) { uint8_t *meta; unsigned int j; - if (get_page_state(pool, i) != SUBPAGE) + if (get_page_state(pool, i) != SPECIAL) continue; memset(count, 0, sizeof(count));