+/* We need this many bytes of metadata. */
+static uint8_t *new_metadata(void *pool, unsigned long poolsize,
+ unsigned long bytes, enum sub_metadata_type type)
+{
+ struct metaheader *mh, *newmh;
+ unsigned long page;
+ uint8_t *meta;
+
+ for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh))
+ if ((meta = alloc_metaspace(pool, poolsize, mh, bytes, type)))
+ return meta;
+
+ /* No room for metadata? Can we expand an existing one? */
+ for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
+ unsigned long nextpage;
+
+ /* We start on this page. */
+ nextpage = pool_offset(pool, (char *)(mh+1))/getpagesize();
+ /* Iterate through any other pages we own. */
+ while (get_page_state(pool, ++nextpage) == TAKEN);
+
+ /* Now, can we grab that page? */
+ if (get_page_state(pool, nextpage) != FREE)
+ continue;
+
+ /* OK, expand metadata, do it again. */
+ set_page_state(pool, nextpage, TAKEN);
+ BUILD_ASSERT(FREE == 0);
+ memset((char *)pool + nextpage*getpagesize(), 0, getpagesize());
+ return alloc_metaspace(pool, poolsize, mh, bytes, type);
+ }
+
+ /* No metadata left at all? */
+ page = alloc_get_pages(pool, poolsize, div_up(bytes, getpagesize()), 1);
+ if (!page)
+ return NULL;
+
+ newmh = (struct metaheader *)((char *)pool + page * getpagesize());
+ BUILD_ASSERT(FREE == 0);
+ memset(newmh + 1, 0, getpagesize() - sizeof(*mh));
+
+ /* Sew it into linked list */
+ mh = first_mheader(pool,poolsize);
+ newmh->next = mh->next;
+ mh->next = pool_offset(pool, newmh);
+
+ return alloc_metaspace(pool, poolsize, newmh, bytes, type);
+}
+
+static void alloc_free_pages(void *pool, unsigned long pagenum)
+{
+ assert(get_page_state(pool, pagenum) == TAKEN_START);
+ set_page_state(pool, pagenum, FREE);
+ while (get_page_state(pool, ++pagenum) == TAKEN)
+ set_page_state(pool, pagenum, FREE);
+}
+
+static void maybe_transform_uniform_page(void *pool, unsigned long offset)
+{
+ /* FIXME: If possible and page isn't full, change to a bitmap */
+}
+
+/* Returns 0 or the size of the uniform alloc to use */
+static unsigned long suitable_for_uc(unsigned long size, unsigned long align)
+{
+ unsigned long num_elems, wastage, usize;
+ unsigned long bitmap_cost;
+
+ if (size == 0)
+ size = 1;
+
+ /* Fix up silly alignments. */
+ usize = align_up(size, align);
+
+ /* How many can fit in this page? */
+ num_elems = SUBPAGE_METAOFF / usize;
+
+ /* Can happen with bigger alignments. */
+ if (!num_elems)
+ return 0;
+
+ /* Usize maxes out at 14 bits. */
+ if (usize >= (1 << 14))
+ return 0;
+
+ /* How many bytes would be left at the end? */
+ wastage = SUBPAGE_METAOFF % usize;
+
+ /* If we can get a larger allocation within alignment constraints, we
+ * should do it, otherwise might as well leave wastage at the end. */
+ usize += align_down(wastage / num_elems, align);
+
+ /* Bitmap allocation costs 2 bits per BITMAP_GRANULARITY bytes, plus
+ * however much we waste in rounding up to BITMAP_GRANULARITY. */
+ bitmap_cost = 2 * div_up(size, BITMAP_GRANULARITY)
+ + CHAR_BIT * (align_up(size, BITMAP_GRANULARITY) - size);
+
+ /* Our cost is 1 bit, plus usize overhead */
+ if (bitmap_cost < 1 + (usize - size) * CHAR_BIT)
+ return 0;
+
+ return usize;
+}
+
+static unsigned long uniform_alloc(void *pool, unsigned long poolsize,
+ struct uniform_cache *uc,
+ unsigned long ucnum)
+{
+ uint8_t *metadata = get_page_metadata(pool, uc->page[ucnum]) + 2;
+ unsigned long i, max;
+
+ /* Simple one-bit-per-object bitmap. */
+ max = SUBPAGE_METAOFF / uc->size[ucnum];
+ for (i = 0; i < max; i++) {
+ if (!(metadata[i / CHAR_BIT] & (1 << (i % CHAR_BIT)))) {
+ metadata[i / CHAR_BIT] |= (1 << (i % CHAR_BIT));
+ return uc->page[ucnum] * getpagesize()
+ + i * uc->size[ucnum];
+ }
+ }
+
+ return 0;
+}
+
+static unsigned long new_uniform_page(void *pool, unsigned long poolsize,
+ unsigned long usize)
+{
+ unsigned long page, metalen;
+ uint8_t *metadata;
+
+ page = alloc_get_pages(pool, poolsize, 1, 1);
+ if (page == 0)
+ return 0;
+
+ metalen = uniform_metalen(usize);
+
+ /* Get metadata for page. */
+ metadata = new_metadata(pool, poolsize, metalen, UNIFORM);
+ if (!metadata) {
+ alloc_free_pages(pool, page);
+ return 0;
+ }
+
+ encode_usize(metadata, usize);
+
+ BUILD_ASSERT(FREE == 0);
+ memset(metadata + 2, 0, metalen - 2);
+
+ /* Actually, this is a subpage page now. */
+ set_page_state(pool, page, SPECIAL);
+
+ /* Set metadata pointer for page. */
+ set_page_metadata(pool, page, metadata);
+
+ return page;
+}
+
+static unsigned long alloc_sub_page(void *pool, unsigned long poolsize,
+ unsigned long size, unsigned long align)
+{
+ unsigned long i, usize;
+ uint8_t *metadata;
+ struct uniform_cache *uc = pool;
+
+ usize = suitable_for_uc(size, align);
+ if (usize) {
+ /* Look for a uniform page. */
+ for (i = 0; i < UNIFORM_CACHE_NUM; i++) {
+ if (uc->size[i] == usize) {
+ unsigned long ret;
+ ret = uniform_alloc(pool, poolsize, uc, i);
+ if (ret != 0)
+ return ret;
+ /* OK, that one is full, remove from cache. */
+ uc->size[i] = 0;
+ break;
+ }
+ }
+
+ /* OK, try a new uniform page. Use random discard for now. */
+ i = random() % UNIFORM_CACHE_NUM;
+ maybe_transform_uniform_page(pool, uc->page[i]);
+
+ uc->page[i] = new_uniform_page(pool, poolsize, usize);
+ if (uc->page[i]) {
+ uc->size[i] = usize;
+ return uniform_alloc(pool, poolsize, uc, i);
+ }
+ uc->size[i] = 0;
+ }
+
+ /* Look for partial page. */
+ for (i = 0; i < poolsize / getpagesize(); i++) {
+ unsigned long ret;
+ if (get_page_state(pool, i) != SPECIAL)
+ continue;
+
+ ret = sub_page_alloc(pool, i, size, align);
+ if (ret)
+ return ret;
+ }
+
+ /* Create new SUBPAGE page. */
+ i = alloc_get_pages(pool, poolsize, 1, 1);
+ if (i == 0)
+ return 0;
+
+ /* Get metadata for page. */
+ metadata = new_metadata(pool, poolsize, BITMAP_METALEN, BITMAP);
+ if (!metadata) {
+ alloc_free_pages(pool, i);
+ return 0;
+ }
+
+ /* Actually, this is a subpage page now. */
+ set_page_state(pool, i, SPECIAL);
+
+ /* Set metadata pointer for page. */
+ set_page_metadata(pool, i, metadata);
+
+ /* Do allocation like normal */
+ return sub_page_alloc(pool, i, size, align);
+}
+
+static bool bitmap_page_is_empty(uint8_t *meta)
+{
+ unsigned int i;
+
+ /* Skip the header (first bit of metadata). */
+ for (i = 1; i < SUBPAGE_METAOFF/BITMAP_GRANULARITY+1; i++)
+ if (get_bit_pair(meta, i) != FREE)
+ return false;
+
+ return true;
+}
+
+static bool uniform_page_is_empty(uint8_t *meta)
+{
+ unsigned int i, metalen;
+
+ metalen = uniform_metalen(decode_usize(meta));
+
+ /* Skip the header (first two bytes of metadata). */
+ for (i = 2; i < metalen + 2; i++) {
+ BUILD_ASSERT(FREE == 0);
+ if (meta[i])
+ return false;
+ }
+ return true;
+}
+
+static bool special_page_is_empty(void *pool, unsigned long page)
+{
+ uint8_t *meta;
+ enum sub_metadata_type type;
+
+ meta = get_page_metadata(pool, page);
+ type = get_bit_pair(meta, 0);
+
+ switch (type) {
+ case UNIFORM:
+ return uniform_page_is_empty(meta);
+ case BITMAP:
+ return bitmap_page_is_empty(meta);
+ default:
+ assert(0);
+ }
+}
+
+static void clear_special_metadata(void *pool, unsigned long page)
+{
+ uint8_t *meta;
+ enum sub_metadata_type type;
+
+ meta = get_page_metadata(pool, page);
+ type = get_bit_pair(meta, 0);
+
+ switch (type) {
+ case UNIFORM:
+ /* First two bytes are the header, rest is already FREE */
+ BUILD_ASSERT(FREE == 0);
+ memset(meta, 0, 2);
+ break;
+ case BITMAP:
+ /* First two bits is the header. */
+ BUILD_ASSERT(BITMAP_METALEN > 1);
+ meta[0] = 0;
+ break;
+ default:
+ assert(0);
+ }
+}
+
+/* Returns true if we cleaned any pages. */
+static bool clean_empty_subpages(void *pool, unsigned long poolsize)
+{
+ unsigned long i;
+ bool progress = false;
+
+ for (i = 0; i < poolsize/getpagesize(); i++) {
+ if (get_page_state(pool, i) != SPECIAL)
+ continue;
+
+ if (special_page_is_empty(pool, i)) {
+ clear_special_metadata(pool, i);
+ set_page_state(pool, i, FREE);
+ progress = true;
+ }
+ }
+ return progress;
+}
+
+/* Returns true if we cleaned any pages. */
+static bool clean_metadata(void *pool, unsigned long poolsize)
+{
+ struct metaheader *mh, *prev_mh = NULL;
+ bool progress = false;
+
+ for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
+ uint8_t *meta;
+ long i;
+ unsigned long metalen = get_metalen(pool, poolsize, mh);
+
+ meta = (uint8_t *)(mh + 1);
+ BUILD_ASSERT(FREE == 0);
+ for (i = metalen - 1; i > 0; i--)
+ if (meta[i] != 0)
+ break;
+
+ /* Completely empty? */
+ if (prev_mh && i == metalen) {
+ alloc_free_pages(pool,
+ pool_offset(pool, mh)/getpagesize());
+ prev_mh->next = mh->next;
+ mh = prev_mh;
+ progress = true;
+ } else {
+ uint8_t *p;
+
+ /* Some pages at end are free? */
+ for (p = (uint8_t *)(mh+1) + metalen - getpagesize();
+ p > meta + i;
+ p -= getpagesize()) {
+ set_page_state(pool,
+ pool_offset(pool, p)
+ / getpagesize(),
+ FREE);
+ progress = true;
+ }
+ }
+ }
+
+ return progress;
+}
+