*/
struct metaheader
{
- /* Length (after this header). (FIXME: Could be in pages). */
+ /* Length (after this header). (FIXME: implied by page bits!). */
unsigned long metalen;
/* Next meta header, or 0 */
unsigned long next;
/* Bits start here. */
};
-#define BITS_PER_PAGE 2
-/* FIXME: Don't use page states for bitblock. It's tacky and confusing. */
-enum page_state
+/* Assumes a is a power of two. */
+static unsigned long align_up(unsigned long x, unsigned long a)
+{
+ return (x + a - 1) & ~(a - 1);
+}
+
+static unsigned long div_up(unsigned long x, unsigned long a)
+{
+ return (x + a - 1) / a;
+}
+
+/* It turns out that we spend a lot of time dealing with bit pairs.
+ * These routines manipulate them.
+ */
+static uint8_t get_bit_pair(const uint8_t *bits, unsigned long index)
+{
+ return bits[index * 2 / CHAR_BIT] >> (index * 2 % CHAR_BIT) & 3;
+}
+
+static void set_bit_pair(uint8_t *bits, unsigned long index, uint8_t val)
+{
+ bits[index * 2 / CHAR_BIT] &= ~(3 << (index * 2 % CHAR_BIT));
+ bits[index * 2 / CHAR_BIT] |= (val << (index * 2 % CHAR_BIT));
+}
+
+/* This is used for page states and subpage allocations */
+enum alloc_state
{
FREE,
TAKEN,
TAKEN_START,
- SUBPAGE,
+ SPECIAL, /* Sub-page allocation for page states. */
};
-/* Assumes a is a power of two. */
-static unsigned long align_up(unsigned long x, unsigned long a)
+/* The types for subpage metadata. */
+enum sub_metadata_type
{
- return (x + a - 1) & ~(a - 1);
+ /* FREE is same as alloc state */
+ BITMAP = 1,
+};
+
+/* Page states are represented by bitpairs, at the start of the pool. */
+#define BITS_PER_PAGE 2
+
+static enum alloc_state get_page_state(const void *pool, unsigned long page)
+{
+ return get_bit_pair(pool, page);
}
-static unsigned long div_up(unsigned long x, unsigned long a)
+static void set_page_state(void *pool, unsigned long page, enum alloc_state s)
{
- return (x + a - 1) / a;
+ set_bit_pair(pool, page, s);
}
/* The offset of metadata for a subpage allocation is found at the end
/* This is the length of metadata in bits. It consists of two bits
* for every BITMAP_GRANULARITY of usable bytes in the page, then two
- * bits for the TAKEN tailer.. */
+ * bits for the tailer.. */
#define BITMAP_METABITLEN \
((div_up(SUBPAGE_METAOFF, BITMAP_GRANULARITY) + 1) * BITS_PER_PAGE)
/* This is the length in bytes. */
#define BITMAP_METALEN (div_up(BITMAP_METABITLEN, CHAR_BIT))
-static enum page_state get_page_state(const uint8_t *bits, unsigned long page)
-{
- return bits[page * 2 / CHAR_BIT] >> (page * 2 % CHAR_BIT) & 3;
-}
-
-static void set_page_state(uint8_t *bits, unsigned long page, enum page_state s)
-{
- bits[page * 2 / CHAR_BIT] &= ~(3 << (page * 2 % CHAR_BIT));
- bits[page * 2 / CHAR_BIT] |= ((uint8_t)s << (page * 2 % CHAR_BIT));
-}
-
static struct metaheader *first_mheader(void *pool, unsigned long poolsize)
{
unsigned int pagestatelen;
mh = first_mheader(pool, poolsize);
- /* len covers all page states, plus the metaheader. */
- len = (char *)(mh + 1) - (char *)pool;
- /* Mark all page states FREE */
+ /* Mark all page states FREE, and all of metaheader bitmap which takes
+ * rest of first page. */
+ len = align_up(pool_offset(pool, mh + 1), getpagesize());
BUILD_ASSERT(FREE == 0);
memset(pool, 0, len);
- /* metaheader len takes us up to next page boundary. */
- mh->metalen = align_up(len, getpagesize()) - len;
+ /* Set up metalen */
+ mh->metalen = len - pool_offset(pool, mh + 1);
/* Mark the pagestate and metadata page(s) allocated. */
set_page_state(pool, 0, TAKEN_START);
set_page_state(pool, i, TAKEN);
}
-/* Two bits per element, representing page states. Returns 0 on fail. */
-static unsigned long alloc_from_bitmap(uint8_t *bits, unsigned long elems,
+/* Two bits per element, representing page states. Returns 0 on fail.
+ * off is used to allocate from subpage bitmaps, which use the first 2
+ * bits as the type, so the real bitmap is offset by 1. */
+static unsigned long alloc_from_bitmap(uint8_t *bits, unsigned long off,
+ unsigned long elems,
unsigned long want, unsigned long align)
{
long i;
free = 0;
/* We allocate from far end, to increase ability to expand metadata. */
for (i = elems - 1; i >= 0; i--) {
- switch (get_page_state(bits, i)) {
+ switch (get_bit_pair(bits, off+i)) {
case FREE:
if (++free >= want) {
unsigned long j;
if (align && i % align)
continue;
+ set_bit_pair(bits, off+i, TAKEN_START);
for (j = i+1; j < i + want; j++)
- set_page_state(bits, j, TAKEN);
- set_page_state(bits, i, TAKEN_START);
- return i;
+ set_bit_pair(bits, off+j, TAKEN);
+ return off+i;
}
break;
- case SUBPAGE:
+ case SPECIAL:
case TAKEN_START:
case TAKEN:
free = 0;
static unsigned long alloc_get_pages(void *pool, unsigned long poolsize,
unsigned long pages, unsigned long align)
{
- long i;
- unsigned long free;
-
- free = 0;
- /* We allocate from far end, to increase ability to expand metadata. */
- for (i = poolsize / getpagesize() - 1; i >= 0; i--) {
- switch (get_page_state(pool, i)) {
- case FREE:
- if (++free >= pages) {
- unsigned long j, addr;
-
- addr = (unsigned long)pool + i * getpagesize();
-
- /* They might ask for multi-page alignment. */
- if (addr % align)
- continue;
-
- for (j = i+1; j < i + pages; j++)
- set_page_state(pool, j, TAKEN);
- set_page_state(pool, i, TAKEN_START);
- return i;
- }
- break;
- case SUBPAGE:
- case TAKEN_START:
- case TAKEN:
- free = 0;
- break;
- }
- }
-
- return 0;
+ return alloc_from_bitmap(pool, 0, poolsize / getpagesize(), pages,
+ align / getpagesize());
}
/* Offset to metadata is at end of page. */
*metadata_off(pool, page) = meta - (uint8_t *)pool;
}
-static void *sub_page_alloc(void *pool, unsigned long page,
- unsigned long size, unsigned long align)
+static unsigned long sub_page_alloc(void *pool, unsigned long page,
+ unsigned long size, unsigned long align)
{
uint8_t *bits = get_page_metadata(pool, page);
unsigned long i;
- /* TAKEN at end means a bitwise alloc. */
- assert(get_page_state(bits, getpagesize()/BITMAP_GRANULARITY - 1)
- == TAKEN);
+ /* TAKEN at start means a bitwise alloc. */
+ assert(get_bit_pair(bits, 0) == BITMAP);
- /* Our bits are the same as the page bits. */
- i = alloc_from_bitmap(bits, SUBPAGE_METAOFF/BITMAP_GRANULARITY,
+ /* We use a standart bitmap, but offset because of that BITMAP
+ * header. */
+ i = alloc_from_bitmap(bits, 1, SUBPAGE_METAOFF/BITMAP_GRANULARITY,
div_up(size, BITMAP_GRANULARITY),
align / BITMAP_GRANULARITY);
/* Can't allocate? */
if (i == 0)
- return NULL;
+ return 0;
- return (char *)pool + page*getpagesize() + i*BITMAP_GRANULARITY;
+ /* i-1 because of the header. */
+ return page*getpagesize() + (i-1)*BITMAP_GRANULARITY;
}
-static uint8_t *alloc_metaspace(struct metaheader *mh, unsigned long bytes)
+static uint8_t *alloc_metaspace(struct metaheader *mh, unsigned long bytes,
+ enum sub_metadata_type type)
{
uint8_t *meta = (uint8_t *)(mh + 1);
unsigned long free = 0, len;
- long i;
+ unsigned long i;
/* TAKEN tags end a subpage alloc. */
- for (i = mh->metalen * CHAR_BIT / BITS_PER_PAGE - 1; i >= 0; i -= len) {
- switch (get_page_state(meta, i)) {
+ for (i = 0; i < mh->metalen * CHAR_BIT / BITS_PER_PAGE; i += len) {
+ switch (get_bit_pair(meta, i)) {
case FREE:
len = 1;
free++;
if (free == bytes * CHAR_BIT / BITS_PER_PAGE) {
- /* TAKEN marks end of metablock. */
- set_page_state(meta, i + free - 1, TAKEN);
- return meta + i / (CHAR_BIT / BITS_PER_PAGE);
+ /* Mark this as a bitmap. */
+ set_bit_pair(meta, i - free + 1, type);
+ return meta + (i - free + 1)
+ / (CHAR_BIT / BITS_PER_PAGE);
}
break;
- case TAKEN:
+ case BITMAP:
/* Skip over this allocated part. */
len = BITMAP_METALEN * CHAR_BIT / BITS_PER_PAGE;
free = 0;
/* We need this many bytes of metadata. */
static uint8_t *new_metadata(void *pool, unsigned long poolsize,
- unsigned long bytes)
+ unsigned long bytes, enum sub_metadata_type type)
{
struct metaheader *mh, *newmh;
unsigned long page;
for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
- uint8_t *meta = alloc_metaspace(mh, bytes);
+ uint8_t *meta = alloc_metaspace(mh, bytes, type);
if (meta)
return meta;
BUILD_ASSERT(FREE == 0);
memset((char *)pool + nextpage, 0, getpagesize());
mh->metalen += getpagesize();
- return alloc_metaspace(mh, bytes);
+ return alloc_metaspace(mh, bytes, type);
}
/* No metadata left at all? */
/* Sew it into linked list */
mh = first_mheader(pool,poolsize);
newmh->next = mh->next;
- mh->next = (char *)newmh - (char *)pool;
+ mh->next = pool_offset(pool, newmh);
- return alloc_metaspace(newmh, bytes);
+ return alloc_metaspace(newmh, bytes, type);
}
static void alloc_free_pages(void *pool, unsigned long pagenum)
set_page_state(pool, pagenum, FREE);
}
-static void *alloc_sub_page(void *pool, unsigned long poolsize,
- unsigned long size, unsigned long align)
+static unsigned long alloc_sub_page(void *pool, unsigned long poolsize,
+ unsigned long size, unsigned long align)
{
unsigned long i;
uint8_t *metadata;
/* Look for partial page. */
for (i = 0; i < poolsize / getpagesize(); i++) {
- void *ret;
- if (get_page_state(pool, i) != SUBPAGE)
+ unsigned long ret;
+ if (get_page_state(pool, i) != SPECIAL)
continue;
ret = sub_page_alloc(pool, i, size, align);
/* Create new SUBPAGE page. */
i = alloc_get_pages(pool, poolsize, 1, 1);
if (i == 0)
- return NULL;
+ return 0;
/* Get metadata for page. */
- metadata = new_metadata(pool, poolsize, BITMAP_METALEN);
+ metadata = new_metadata(pool, poolsize, BITMAP_METALEN, BITMAP);
if (!metadata) {
alloc_free_pages(pool, i);
- return NULL;
+ return 0;
}
- /* Actually, this is a SUBPAGE page now. */
- set_page_state(pool, i, SUBPAGE);
+ /* Actually, this is a subpage page now. */
+ set_page_state(pool, i, SPECIAL);
/* Set metadata pointer for page. */
set_page_metadata(pool, i, metadata);
return sub_page_alloc(pool, i, size, align);
}
+/* Returns true if we cleaned any pages. */
+static bool clean_empty_subpages(void *pool, unsigned long poolsize)
+{
+ unsigned long i;
+ bool progress = false;
+
+ for (i = 0; i < poolsize/getpagesize(); i++) {
+ uint8_t *meta;
+ unsigned int j;
+ if (get_page_state(pool, i) != SPECIAL)
+ continue;
+
+ meta = get_page_metadata(pool, i);
+ /* Skip the header (first bit of metadata). */
+ for (j = 1; j < SUBPAGE_METAOFF/BITMAP_GRANULARITY+1; j++)
+ if (get_bit_pair(meta, j) != FREE)
+ break;
+
+ /* So, is this page totally empty? */
+ if (j == SUBPAGE_METAOFF/BITMAP_GRANULARITY+1) {
+ set_page_state(pool, i, FREE);
+ progress = true;
+ }
+ }
+ return progress;
+}
+
+/* Returns true if we cleaned any pages. */
+static bool clean_metadata(void *pool, unsigned long poolsize)
+{
+ struct metaheader *mh, *prev_mh = NULL;
+ bool progress = false;
+
+ for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
+ uint8_t *meta;
+ long i;
+
+ meta = (uint8_t *)(mh + 1);
+ BUILD_ASSERT(FREE == 0);
+ for (i = mh->metalen - 1; i > 0; i--)
+ if (meta[i] != 0)
+ break;
+
+ /* Completely empty? */
+ if (prev_mh && i == mh->metalen) {
+ alloc_free_pages(pool,
+ pool_offset(pool, mh)/getpagesize());
+ prev_mh->next = mh->next;
+ mh = prev_mh;
+ progress = true;
+ } else {
+ uint8_t *p;
+
+ /* Some pages at end are free? */
+ for (p = (uint8_t *)(mh+1)+mh->metalen - getpagesize();
+ p > meta + i;
+ p -= getpagesize()) {
+ set_page_state(pool,
+ pool_offset(pool, p)
+ / getpagesize(),
+ FREE);
+ progress = true;
+ }
+ }
+ }
+
+ return progress;
+}
+
void *alloc_get(void *pool, unsigned long poolsize,
unsigned long size, unsigned long align)
{
+ bool subpage_clean = false, metadata_clean = false;
+ unsigned long ret;
+
if (poolsize < MIN_SIZE)
return NULL;
- /* Sub-page allocations have an overhead of 25%. */
- if (size + size/4 >= getpagesize() || align >= getpagesize()) {
- unsigned long ret, pages = div_up(size, getpagesize());
+again:
+ /* Sub-page allocations have an overhead of ~12%. */
+ if (size + size/8 >= getpagesize() || align >= getpagesize()) {
+ unsigned long pages = div_up(size, getpagesize());
- ret = alloc_get_pages(pool, poolsize, pages, align);
- if (ret == 0)
- return NULL;
- return (char *)pool + ret * getpagesize();
+ ret = alloc_get_pages(pool, poolsize, pages, align)
+ * getpagesize();
+ } else
+ ret = alloc_sub_page(pool, poolsize, size, align);
+
+ if (ret != 0)
+ return (char *)pool + ret;
+
+ /* Allocation failed: garbage collection. */
+ if (!subpage_clean) {
+ subpage_clean = true;
+ if (clean_empty_subpages(pool, poolsize))
+ goto again;
+ }
+
+ if (!metadata_clean) {
+ metadata_clean = true;
+ if (clean_metadata(pool, poolsize))
+ goto again;
}
- return alloc_sub_page(pool, poolsize, size, align);
+ /* FIXME: Compact metadata? */
+ return NULL;
}
static void subpage_free(void *pool, unsigned long pagenum, void *free)
off /= BITMAP_GRANULARITY;
- set_page_state(metadata, off++, FREE);
- while (off < SUBPAGE_METAOFF / BITMAP_GRANULARITY
- && get_page_state(metadata, off) == TAKEN)
- set_page_state(metadata, off++, FREE);
+ /* Offset by one because first bit is used for header. */
+ off++;
- /* FIXME: If whole page free, free page and metadata. */
+ set_bit_pair(metadata, off++, FREE);
+ while (off < SUBPAGE_METAOFF / BITMAP_GRANULARITY
+ && get_bit_pair(metadata, off) == TAKEN)
+ set_bit_pair(metadata, off++, FREE);
}
void alloc_free(void *pool, unsigned long poolsize, void *free)
pagenum = pool_offset(pool, free) / getpagesize();
- if (get_page_state(pool, pagenum) == SUBPAGE)
+ if (get_page_state(pool, pagenum) == SPECIAL)
subpage_free(pool, pagenum, free);
else {
assert((unsigned long)free % getpagesize() == 0);
{
unsigned long *mhoff = metadata_off(pool, page);
unsigned int i;
- enum page_state last_state = FREE;
+ enum alloc_state last_state = FREE;
if (*mhoff + sizeof(struct metaheader) > poolsize)
return false;
if (!is_metadata_page(pool, poolsize, *mhoff / getpagesize()))
return false;
- /* Marker at end of subpage allocation is "taken" */
- if (get_page_state((uint8_t *)pool + *mhoff,
- getpagesize()/BITMAP_GRANULARITY - 1) != TAKEN)
+ /* Header at start of subpage allocation */
+ if (get_bit_pair((uint8_t *)pool + *mhoff, 0) != BITMAP)
return false;
for (i = 0; i < SUBPAGE_METAOFF / BITMAP_GRANULARITY; i++) {
- enum page_state state;
+ enum alloc_state state;
- state = get_page_state((uint8_t *)pool + *mhoff, i);
+ /* +1 because header is the first byte. */
+ state = get_bit_pair((uint8_t *)pool + *mhoff, i+1);
switch (state) {
- case SUBPAGE:
+ case SPECIAL:
return false;
case TAKEN:
if (last_state == FREE)
{
unsigned long i;
struct metaheader *mh;
- enum page_state last_state = FREE;
+ enum alloc_state last_state = FREE;
bool was_metadata = false;
if (poolsize < MIN_SIZE)
}
for (i = 0; i < poolsize / getpagesize(); i++) {
- enum page_state state = get_page_state(pool, i);
+ enum alloc_state state = get_page_state(pool, i);
bool is_metadata = is_metadata_page(pool, poolsize,i);
switch (state) {
if (is_metadata != was_metadata)
return false;
break;
- case SUBPAGE:
+ case SPECIAL:
/* Check metadata pointer etc. */
if (!check_subpage(pool, poolsize, i))
return false;
metadata_pages += (sizeof(*mh) + mh->metalen) / getpagesize();
- /* TAKEN tags end a subpage alloc. */
- for (i = mh->metalen * CHAR_BIT/BITS_PER_PAGE - 1;
- i >= 0;
- i -= len) {
+ for (i = 0;
+ i < mh->metalen * CHAR_BIT / BITS_PER_PAGE;
+ i += len) {
switch (get_page_state(meta, i)) {
case FREE:
len = 1;
free++;
break;
- case TAKEN:
+ case BITMAP:
/* Skip over this allocated part. */
len = BITMAP_METALEN * CHAR_BIT;
subpageblocks++;
for (i = 0; i < poolsize / getpagesize(); i++) {
uint8_t *meta;
unsigned int j;
- if (get_page_state(pool, i) != SUBPAGE)
+ if (get_page_state(pool, i) != SPECIAL)
continue;
memset(count, 0, sizeof(count));