+
+ metalen = uniform_metalen(decode_usize(meta));
+
+ /* Skip the header (first two bytes of metadata). */
+ for (i = 2; i < metalen + 2; i++) {
+ BUILD_ASSERT(FREE == 0);
+ if (meta[i])
+ return false;
+ }
+ return true;
+}
+
+static bool special_page_is_empty(void *pool, unsigned long page)
+{
+ uint8_t *meta;
+ enum sub_metadata_type type;
+
+ meta = get_page_metadata(pool, page);
+ type = get_bit_pair(meta, 0);
+
+ switch (type) {
+ case UNIFORM:
+ return uniform_page_is_empty(meta);
+ case BITMAP:
+ return bitmap_page_is_empty(meta);
+ default:
+ assert(0);
+ }
+}
+
+static void clear_special_metadata(void *pool, unsigned long page)
+{
+ uint8_t *meta;
+ enum sub_metadata_type type;
+
+ meta = get_page_metadata(pool, page);
+ type = get_bit_pair(meta, 0);
+
+ switch (type) {
+ case UNIFORM:
+ /* First two bytes are the header, rest is already FREE */
+ BUILD_ASSERT(FREE == 0);
+ memset(meta, 0, 2);
+ break;
+ case BITMAP:
+ /* First two bits is the header. */
+ BUILD_ASSERT(BITMAP_METALEN > 1);
+ meta[0] = 0;
+ break;
+ default:
+ assert(0);
+ }
+}
+
+/* Returns true if we cleaned any pages. */
+static bool clean_empty_subpages(void *pool, unsigned long poolsize)
+{
+ unsigned long i;
+ bool progress = false;
+
+ for (i = 0; i < poolsize/getpagesize(); i++) {
+ if (get_page_state(pool, i) != SPECIAL)
+ continue;
+
+ if (special_page_is_empty(pool, i)) {
+ clear_special_metadata(pool, i);
+ set_page_state(pool, i, FREE);
+ progress = true;
+ }
+ }
+ return progress;
+}
+
+/* Returns true if we cleaned any pages. */
+static bool clean_metadata(void *pool, unsigned long poolsize)
+{
+ struct metaheader *mh, *prev_mh = NULL;
+ bool progress = false;
+
+ for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
+ uint8_t *meta;
+ long i;
+ unsigned long metalen = get_metalen(pool, poolsize, mh);
+
+ meta = (uint8_t *)(mh + 1);
+ BUILD_ASSERT(FREE == 0);
+ for (i = metalen - 1; i > 0; i--)
+ if (meta[i] != 0)
+ break;
+
+ /* Completely empty? */
+ if (prev_mh && i == metalen) {
+ alloc_free_pages(pool,
+ pool_offset(pool, mh)/getpagesize());
+ prev_mh->next = mh->next;
+ mh = prev_mh;
+ progress = true;
+ } else {
+ uint8_t *p;
+
+ /* Some pages at end are free? */
+ for (p = (uint8_t *)(mh+1) + metalen - getpagesize();
+ p > meta + i;
+ p -= getpagesize()) {
+ set_page_state(pool,
+ pool_offset(pool, p)
+ / getpagesize(),
+ FREE);
+ progress = true;
+ }
+ }
+ }
+
+ return progress;
+}
+
+void *alloc_get(void *pool, unsigned long poolsize,
+ unsigned long size, unsigned long align)
+{
+ bool subpage_clean = false, metadata_clean = false;
+ unsigned long ret;
+
+ if (poolsize < MIN_SIZE)
+ return NULL;
+
+again:
+ /* Sub-page allocations have an overhead of ~12%. */
+ if (size + size/8 >= getpagesize() || align >= getpagesize()) {
+ unsigned long pages = div_up(size, getpagesize());
+
+ ret = alloc_get_pages(pool, poolsize, pages, align)
+ * getpagesize();
+ } else
+ ret = alloc_sub_page(pool, poolsize, size, align);
+
+ if (ret != 0)
+ return (char *)pool + ret;
+
+ /* Allocation failed: garbage collection. */
+ if (!subpage_clean) {
+ subpage_clean = true;
+ if (clean_empty_subpages(pool, poolsize))
+ goto again;
+ }
+
+ if (!metadata_clean) {
+ metadata_clean = true;
+ if (clean_metadata(pool, poolsize))
+ goto again;
+ }
+
+ /* FIXME: Compact metadata? */
+ return NULL;
+}
+
+static void bitmap_free(void *pool, unsigned long pagenum, unsigned long off,
+ uint8_t *metadata)
+{
+ assert(off % BITMAP_GRANULARITY == 0);
+
+ off /= BITMAP_GRANULARITY;
+
+ /* Offset by one because first bit is used for header. */
+ off++;
+
+ set_bit_pair(metadata, off++, FREE);
+ while (off < SUBPAGE_METAOFF / BITMAP_GRANULARITY
+ && get_bit_pair(metadata, off) == TAKEN)
+ set_bit_pair(metadata, off++, FREE);
+}
+
+static void uniform_free(void *pool, unsigned long pagenum, unsigned long off,
+ uint8_t *metadata)
+{
+ unsigned int usize, bit;
+
+ usize = decode_usize(metadata);
+ /* Must have been this size. */
+ assert(off % usize == 0);
+ bit = off / usize;
+
+ /* Skip header. */
+ metadata += 2;
+
+ /* Must have been allocated. */
+ assert(metadata[bit / CHAR_BIT] & (1 << (bit % CHAR_BIT)));
+ metadata[bit / CHAR_BIT] &= ~(1 << (bit % CHAR_BIT));
+}
+
+static void subpage_free(void *pool, unsigned long pagenum, void *free)
+{
+ unsigned long off = (unsigned long)free % getpagesize();
+ uint8_t *metadata = get_page_metadata(pool, pagenum);
+ enum sub_metadata_type type;
+
+ type = get_bit_pair(metadata, 0);
+
+ assert(off < SUBPAGE_METAOFF);
+
+ switch (type) {
+ case BITMAP:
+ bitmap_free(pool, pagenum, off, metadata);
+ break;
+ case UNIFORM:
+ uniform_free(pool, pagenum, off, metadata);
+ break;
+ default:
+ assert(0);
+ }
+}
+
+void alloc_free(void *pool, unsigned long poolsize, void *free)
+{
+ unsigned long pagenum;
+ struct metaheader *mh;
+
+ if (!free)
+ return;
+
+ assert(poolsize >= MIN_SIZE);
+
+ mh = first_mheader(pool, poolsize);
+ assert((char *)free >= (char *)(mh + 1));
+ assert((char *)pool + poolsize > (char *)free);
+
+ pagenum = pool_offset(pool, free) / getpagesize();
+
+ if (get_page_state(pool, pagenum) == SPECIAL)
+ subpage_free(pool, pagenum, free);
+ else {
+ assert((unsigned long)free % getpagesize() == 0);
+ alloc_free_pages(pool, pagenum);
+ }
+}
+
+static bool is_metadata_page(void *pool, unsigned long poolsize,
+ unsigned long page)
+{
+ struct metaheader *mh;
+
+ for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
+ unsigned long start, end;
+
+ start = pool_offset(pool, mh);
+ end = pool_offset(pool, (char *)(mh+1)
+ + get_metalen(pool, poolsize, mh));
+ if (page >= start/getpagesize() && page < end/getpagesize())
+ return true;
+ }
+ return false;
+}
+
+static bool check_bitmap_metadata(void *pool, unsigned long *mhoff)
+{
+ enum alloc_state last_state = FREE;
+ unsigned int i;
+
+ for (i = 0; i < SUBPAGE_METAOFF / BITMAP_GRANULARITY; i++) {
+ enum alloc_state state;
+
+ /* +1 because header is the first byte. */
+ state = get_bit_pair((uint8_t *)pool + *mhoff, i+1);
+ switch (state) {
+ case SPECIAL:
+ return false;
+ case TAKEN:
+ if (last_state == FREE)
+ return false;
+ break;
+ default:
+ break;
+ }
+ last_state = state;
+ }
+ return true;
+}
+
+static bool check_uniform_metadata(void *pool, unsigned long *mhoff)
+{
+ uint8_t *meta = (uint8_t *)pool + *mhoff;
+ unsigned int i, usize;
+ struct uniform_cache *uc = pool;
+
+ usize = decode_usize(meta);
+ if (usize == 0 || suitable_for_uc(usize, 1) != usize)
+ return false;
+
+ /* If it's in uniform cache, make sure that agrees on size. */
+ for (i = 0; i < UNIFORM_CACHE_NUM; i++) {
+ uint8_t *ucm;
+
+ if (!uc->size[i])
+ continue;
+
+ ucm = get_page_metadata(pool, uc->page[i]);
+ if (ucm != meta)
+ continue;
+
+ if (usize != uc->size[i])
+ return false;
+ }
+ return true;
+}
+
+static bool check_subpage(void *pool, unsigned long poolsize,
+ unsigned long page)
+{
+ unsigned long *mhoff = metadata_off(pool, page);
+
+ if (*mhoff + sizeof(struct metaheader) > poolsize)
+ return false;
+
+ if (*mhoff % ALIGNOF(struct metaheader) != 0)
+ return false;
+
+ /* It must point to a metadata page. */
+ if (!is_metadata_page(pool, poolsize, *mhoff / getpagesize()))
+ return false;
+
+ /* Header at start of subpage allocation */
+ switch (get_bit_pair((uint8_t *)pool + *mhoff, 0)) {
+ case BITMAP:
+ return check_bitmap_metadata(pool, mhoff);
+ case UNIFORM:
+ return check_uniform_metadata(pool, mhoff);
+ default:
+ return false;
+ }
+
+}
+
+bool alloc_check(void *pool, unsigned long poolsize)
+{
+ unsigned long i;
+ struct metaheader *mh;
+ enum alloc_state last_state = FREE;
+ bool was_metadata = false;