X-Git-Url: https://git.ozlabs.org/?a=blobdiff_plain;f=alloc%2Falloc.c;h=c64af2dfb45f56deeace603626e95666c5731c74;hb=c62437a984560994f1aff3e09a01076af22563a1;hp=4502b9389fd1f811ef21f63b41435f4b1c65f49d;hpb=03a2acddc523fad69b8180619b0c77882add332a;p=ccan diff --git a/alloc/alloc.c b/alloc/alloc.c index 4502b938..c64af2df 100644 --- a/alloc/alloc.c +++ b/alloc/alloc.c @@ -35,8 +35,6 @@ */ struct metaheader { - /* Length (after this header). (FIXME: implied by page bits!). */ - unsigned long metalen; /* Next meta header, or 0 */ unsigned long next; /* Bits start here. */ @@ -143,24 +141,24 @@ void alloc_init(void *pool, unsigned long poolsize) mh = first_mheader(pool, poolsize); - /* len covers all page states, plus the metaheader. */ - len = (char *)(mh + 1) - (char *)pool; - /* Mark all page states FREE */ + /* Mark all page states FREE, and all of metaheader bitmap which takes + * rest of first page. */ + len = align_up(pool_offset(pool, mh + 1), getpagesize()); BUILD_ASSERT(FREE == 0); memset(pool, 0, len); - /* metaheader len takes us up to next page boundary. */ - mh->metalen = align_up(len, getpagesize()) - len; - /* Mark the pagestate and metadata page(s) allocated. */ set_page_state(pool, 0, TAKEN_START); for (i = 1; i < div_up(len, getpagesize()); i++) set_page_state(pool, i, TAKEN); } -/* Two bits per element, representing page states. Returns -1 on fail. */ -static long alloc_from_bitmap(uint8_t *bits, unsigned long elems, - unsigned long want, unsigned long align) +/* Two bits per element, representing page states. Returns 0 on fail. + * off is used to allocate from subpage bitmaps, which use the first 2 + * bits as the type, so the real bitmap is offset by 1. */ +static unsigned long alloc_from_bitmap(uint8_t *bits, unsigned long off, + unsigned long elems, + unsigned long want, unsigned long align) { long i; unsigned long free; @@ -168,7 +166,7 @@ static long alloc_from_bitmap(uint8_t *bits, unsigned long elems, free = 0; /* We allocate from far end, to increase ability to expand metadata. */ for (i = elems - 1; i >= 0; i--) { - switch (get_bit_pair(bits, i)) { + switch (get_bit_pair(bits, off+i)) { case FREE: if (++free >= want) { unsigned long j; @@ -177,10 +175,10 @@ static long alloc_from_bitmap(uint8_t *bits, unsigned long elems, if (align && i % align) continue; + set_bit_pair(bits, off+i, TAKEN_START); for (j = i+1; j < i + want; j++) - set_bit_pair(bits, j, TAKEN); - set_bit_pair(bits, i, TAKEN_START); - return i; + set_bit_pair(bits, off+j, TAKEN); + return off+i; } break; case SPECIAL: @@ -191,44 +189,14 @@ static long alloc_from_bitmap(uint8_t *bits, unsigned long elems, } } - return -1; + return 0; } static unsigned long alloc_get_pages(void *pool, unsigned long poolsize, unsigned long pages, unsigned long align) { - long i; - unsigned long free; - - free = 0; - /* We allocate from far end, to increase ability to expand metadata. */ - for (i = poolsize / getpagesize() - 1; i >= 0; i--) { - switch (get_page_state(pool, i)) { - case FREE: - if (++free >= pages) { - unsigned long j, addr; - - addr = (unsigned long)pool + i * getpagesize(); - - /* They might ask for multi-page alignment. */ - if (addr % align) - continue; - - for (j = i+1; j < i + pages; j++) - set_page_state(pool, j, TAKEN); - set_page_state(pool, i, TAKEN_START); - return i; - } - break; - case SPECIAL: - case TAKEN_START: - case TAKEN: - free = 0; - break; - } - } - - return 0; + return alloc_from_bitmap(pool, 0, poolsize / getpagesize(), pages, + align / getpagesize()); } /* Offset to metadata is at end of page. */ @@ -252,39 +220,59 @@ static unsigned long sub_page_alloc(void *pool, unsigned long page, unsigned long size, unsigned long align) { uint8_t *bits = get_page_metadata(pool, page); - long i; + unsigned long i; - /* TAKEN at end means a bitwise alloc. */ - assert(get_bit_pair(bits, getpagesize()/BITMAP_GRANULARITY-1) == TAKEN); + /* TAKEN at start means a bitwise alloc. */ + assert(get_bit_pair(bits, 0) == BITMAP); - /* Our bits are the same as the page bits. */ - i = alloc_from_bitmap(bits, SUBPAGE_METAOFF/BITMAP_GRANULARITY, + /* We use a standart bitmap, but offset because of that BITMAP + * header. */ + i = alloc_from_bitmap(bits, 1, SUBPAGE_METAOFF/BITMAP_GRANULARITY, div_up(size, BITMAP_GRANULARITY), align / BITMAP_GRANULARITY); /* Can't allocate? */ - if (i < 0) + if (i == 0) return 0; - return page*getpagesize() + i*BITMAP_GRANULARITY; + /* i-1 because of the header. */ + return page*getpagesize() + (i-1)*BITMAP_GRANULARITY; +} + +/* We look at the page states to figure out where the allocation for this + * metadata ends. */ +static unsigned long get_metalen(void *pool, unsigned long poolsize, + struct metaheader *mh) +{ + unsigned long i, first, pages = poolsize / getpagesize(); + + first = pool_offset(pool, mh + 1)/getpagesize(); + + for (i = first + 1; i < pages && get_page_state(pool,i) == TAKEN; i++); + + return i * getpagesize() - pool_offset(pool, mh + 1); } -static uint8_t *alloc_metaspace(struct metaheader *mh, unsigned long bytes) +static uint8_t *alloc_metaspace(void *pool, unsigned long poolsize, + struct metaheader *mh, unsigned long bytes, + enum sub_metadata_type type) { uint8_t *meta = (uint8_t *)(mh + 1); - unsigned long free = 0, len; - long i; + unsigned long free = 0, len, i, metalen; + + metalen = get_metalen(pool, poolsize, mh); /* TAKEN tags end a subpage alloc. */ - for (i = mh->metalen * CHAR_BIT / BITS_PER_PAGE - 1; i >= 0; i -= len) { + for (i = 0; i < metalen * CHAR_BIT / BITS_PER_PAGE; i += len) { switch (get_bit_pair(meta, i)) { case FREE: len = 1; free++; if (free == bytes * CHAR_BIT / BITS_PER_PAGE) { - /* TAKEN marks end of metablock. */ - set_page_state(meta, i + free - 1, TAKEN); - return meta + i / (CHAR_BIT / BITS_PER_PAGE); + /* Mark this as a bitmap. */ + set_bit_pair(meta, i - free + 1, type); + return meta + (i - free + 1) + / (CHAR_BIT / BITS_PER_PAGE); } break; case BITMAP: @@ -302,13 +290,13 @@ static uint8_t *alloc_metaspace(struct metaheader *mh, unsigned long bytes) /* We need this many bytes of metadata. */ static uint8_t *new_metadata(void *pool, unsigned long poolsize, - unsigned long bytes) + unsigned long bytes, enum sub_metadata_type type) { struct metaheader *mh, *newmh; unsigned long page; for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){ - uint8_t *meta = alloc_metaspace(mh, bytes); + uint8_t *meta = alloc_metaspace(pool, poolsize, mh, bytes,type); if (meta) return meta; @@ -316,22 +304,22 @@ static uint8_t *new_metadata(void *pool, unsigned long poolsize, /* No room for metadata? Can we expand an existing one? */ for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){ - /* It should end on a page boundary. */ unsigned long nextpage; - nextpage = pool_offset(pool, (char *)(mh + 1) + mh->metalen); - assert(nextpage % getpagesize() == 0); + /* We start on this page. */ + nextpage = pool_offset(pool, (char *)(mh+1))/getpagesize(); + /* Iterate through any other pages we own. */ + while (get_page_state(pool, ++nextpage) == TAKEN) /* Now, can we grab that page? */ - if (get_page_state(pool, nextpage / getpagesize()) != FREE) + if (get_page_state(pool, nextpage) != FREE) continue; /* OK, expand metadata, do it again. */ - set_page_state(pool, nextpage / getpagesize(), TAKEN); + set_page_state(pool, nextpage, TAKEN); BUILD_ASSERT(FREE == 0); - memset((char *)pool + nextpage, 0, getpagesize()); - mh->metalen += getpagesize(); - return alloc_metaspace(mh, bytes); + memset((char *)pool + nextpage*getpagesize(), 0, getpagesize()); + return alloc_metaspace(pool, poolsize, mh, bytes, type); } /* No metadata left at all? */ @@ -340,16 +328,15 @@ static uint8_t *new_metadata(void *pool, unsigned long poolsize, return NULL; newmh = (struct metaheader *)((char *)pool + page * getpagesize()); - newmh->metalen = getpagesize() - sizeof(*mh); BUILD_ASSERT(FREE == 0); - memset(newmh + 1, 0, newmh->metalen); + memset(newmh + 1, 0, getpagesize() - sizeof(*mh)); /* Sew it into linked list */ mh = first_mheader(pool,poolsize); newmh->next = mh->next; mh->next = pool_offset(pool, newmh); - return alloc_metaspace(newmh, bytes); + return alloc_metaspace(pool, poolsize, newmh, bytes, type); } static void alloc_free_pages(void *pool, unsigned long pagenum) @@ -383,7 +370,7 @@ static unsigned long alloc_sub_page(void *pool, unsigned long poolsize, return 0; /* Get metadata for page. */ - metadata = new_metadata(pool, poolsize, BITMAP_METALEN); + metadata = new_metadata(pool, poolsize, BITMAP_METALEN, BITMAP); if (!metadata) { alloc_free_pages(pool, i); return 0; @@ -412,12 +399,13 @@ static bool clean_empty_subpages(void *pool, unsigned long poolsize) continue; meta = get_page_metadata(pool, i); - for (j = 0; j < SUBPAGE_METAOFF/BITMAP_GRANULARITY; j++) - if (get_page_state(meta, j) != FREE) + /* Skip the header (first bit of metadata). */ + for (j = 1; j < SUBPAGE_METAOFF/BITMAP_GRANULARITY+1; j++) + if (get_bit_pair(meta, j) != FREE) break; /* So, is this page totally empty? */ - if (j == SUBPAGE_METAOFF/BITMAP_GRANULARITY) { + if (j == SUBPAGE_METAOFF/BITMAP_GRANULARITY+1) { set_page_state(pool, i, FREE); progress = true; } @@ -434,15 +422,16 @@ static bool clean_metadata(void *pool, unsigned long poolsize) for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){ uint8_t *meta; long i; + unsigned long metalen = get_metalen(pool, poolsize, mh); meta = (uint8_t *)(mh + 1); BUILD_ASSERT(FREE == 0); - for (i = mh->metalen - 1; i > 0; i--) + for (i = metalen - 1; i > 0; i--) if (meta[i] != 0) break; /* Completely empty? */ - if (prev_mh && i == mh->metalen) { + if (prev_mh && i == metalen) { alloc_free_pages(pool, pool_offset(pool, mh)/getpagesize()); prev_mh->next = mh->next; @@ -452,7 +441,7 @@ static bool clean_metadata(void *pool, unsigned long poolsize) uint8_t *p; /* Some pages at end are free? */ - for (p = (uint8_t *)(mh+1)+mh->metalen - getpagesize(); + for (p = (uint8_t *)(mh+1) + metalen - getpagesize(); p > meta + i; p -= getpagesize()) { set_page_state(pool, @@ -518,10 +507,13 @@ static void subpage_free(void *pool, unsigned long pagenum, void *free) off /= BITMAP_GRANULARITY; - set_page_state(metadata, off++, FREE); + /* Offset by one because first bit is used for header. */ + off++; + + set_bit_pair(metadata, off++, FREE); while (off < SUBPAGE_METAOFF / BITMAP_GRANULARITY - && get_page_state(metadata, off) == TAKEN) - set_page_state(metadata, off++, FREE); + && get_bit_pair(metadata, off) == TAKEN) + set_bit_pair(metadata, off++, FREE); } void alloc_free(void *pool, unsigned long poolsize, void *free) @@ -535,7 +527,7 @@ void alloc_free(void *pool, unsigned long poolsize, void *free) assert(poolsize >= MIN_SIZE); mh = first_mheader(pool, poolsize); - assert((char *)free >= (char *)(mh + 1) + mh->metalen); + assert((char *)free >= (char *)(mh + 1)); assert((char *)pool + poolsize > (char *)free); pagenum = pool_offset(pool, free) / getpagesize(); @@ -557,7 +549,8 @@ static bool is_metadata_page(void *pool, unsigned long poolsize, unsigned long start, end; start = pool_offset(pool, mh); - end = pool_offset(pool, (char *)(mh+1) + mh->metalen); + end = pool_offset(pool, (char *)(mh+1) + + get_metalen(pool, poolsize, mh)); if (page >= start/getpagesize() && page < end/getpagesize()) return true; } @@ -581,15 +574,15 @@ static bool check_subpage(void *pool, unsigned long poolsize, if (!is_metadata_page(pool, poolsize, *mhoff / getpagesize())) return false; - /* Marker at end of subpage allocation is "taken" */ - if (get_bit_pair((uint8_t *)pool + *mhoff, - getpagesize()/BITMAP_GRANULARITY - 1) != TAKEN) + /* Header at start of subpage allocation */ + if (get_bit_pair((uint8_t *)pool + *mhoff, 0) != BITMAP) return false; for (i = 0; i < SUBPAGE_METAOFF / BITMAP_GRANULARITY; i++) { enum alloc_state state; - state = get_bit_pair((uint8_t *)pool + *mhoff, i); + /* +1 because header is the first byte. */ + state = get_bit_pair((uint8_t *)pool + *mhoff, i+1); switch (state) { case SPECIAL: return false; @@ -627,7 +620,8 @@ bool alloc_check(void *pool, unsigned long poolsize) if (start + sizeof(*mh) > poolsize) return false; - end = pool_offset(pool, (char *)(mh+1) + mh->metalen); + end = pool_offset(pool, (char *)(mh+1) + + get_metalen(pool, poolsize, mh)); if (end > poolsize) return false; @@ -695,21 +689,19 @@ void alloc_visualize(FILE *out, void *pool, unsigned long poolsize) /* Now do each metadata page. */ for (; mh; mh = next_mheader(pool,mh)) { - unsigned long free = 0, subpageblocks = 0, len = 0; + unsigned long free = 0, subpageblocks = 0, len = 0, metalen; uint8_t *meta = (uint8_t *)(mh + 1); - metadata_pages += (sizeof(*mh) + mh->metalen) / getpagesize(); + metalen = get_metalen(pool, poolsize, mh); + metadata_pages += (sizeof(*mh) + metalen) / getpagesize(); - /* TAKEN tags end a subpage alloc. */ - for (i = mh->metalen * CHAR_BIT/BITS_PER_PAGE - 1; - i >= 0; - i -= len) { + for (i = 0; i < metalen * CHAR_BIT / BITS_PER_PAGE; i += len) { switch (get_page_state(meta, i)) { case FREE: len = 1; free++; break; - case TAKEN: + case BITMAP: /* Skip over this allocated part. */ len = BITMAP_METALEN * CHAR_BIT; subpageblocks++; @@ -721,7 +713,7 @@ void alloc_visualize(FILE *out, void *pool, unsigned long poolsize) fprintf(out, "Metadata %lu-%lu: %lu free, %lu subpageblocks, %lu%% density\n", pool_offset(pool, mh), - pool_offset(pool, (char *)(mh+1) + mh->metalen), + pool_offset(pool, (char *)(mh+1) + metalen), free, subpageblocks, subpageblocks * BITMAP_METALEN * 100 / (free + subpageblocks * BITMAP_METALEN));