7 #include "build_assert/build_assert.h"
11 #define ALIGNOF(t) __alignof__(t)
13 /* Alignment by measuring structure padding. */
14 #define ALIGNOF(t) (sizeof(struct { char c; t _h; }) - 1 - sizeof(t))
17 /* FIXME: Doesn't handle non-page-aligned poolsize. */
20 #define MIN_SIZE (getpagesize() * 2)
22 /* What's the granularity of sub-page allocs? */
23 #define BITMAP_GRANULARITY 4
27 * file := pagestates pad metadata
28 * pagestates := pages * 2-bits-per-page
29 * pad := pad to next ALIGNOF(metadata)
31 * metadata := metalen next-ptr metabits
32 * metabits := freeblock | bitblock
34 * bitblock := 2-bits-per-bit-in-page 1
38 /* Length (after this header). (FIXME: Could be in pages). */
39 unsigned long metalen;
40 /* Next meta header, or 0 */
42 /* Bits start here. */
45 #define BITS_PER_PAGE 2
46 /* FIXME: Don't use page states for bitblock. It's tacky and confusing. */
55 /* Assumes a is a power of two. */
56 static unsigned long align_up(unsigned long x, unsigned long a)
58 return (x + a - 1) & ~(a - 1);
61 static unsigned long div_up(unsigned long x, unsigned long a)
63 return (x + a - 1) / a;
66 /* The offset of metadata for a subpage allocation is found at the end
68 #define SUBPAGE_METAOFF (getpagesize() - sizeof(unsigned long))
70 /* This is the length of metadata in bits. It consists of a TAKEN header,
71 * then two bits for every BITMAP_GRANULARITY of usable bytes in the. */
72 #define BITMAP_METABITLEN \
73 ((1 + div_up(SUBPAGE_METAOFF, BITMAP_GRANULARITY)) * BITS_PER_PAGE)
75 /* This is the length in bytes. */
76 #define BITMAP_METALEN (div_up(BITMAP_METABITLEN, CHAR_BIT))
78 static enum page_state get_page_state(const uint8_t *bits, unsigned long page)
80 return bits[page * 2 / CHAR_BIT] >> (page * 2 % CHAR_BIT) & 3;
83 static void set_page_state(uint8_t *bits, unsigned long page, enum page_state s)
85 bits[page * 2 / CHAR_BIT] &= ~(3 << (page * 2 % CHAR_BIT));
86 bits[page * 2 / CHAR_BIT] |= ((uint8_t)s << (page * 2 % CHAR_BIT));
89 static struct metaheader *first_mheader(void *pool, unsigned long poolsize)
91 unsigned int pagestatelen;
93 pagestatelen = align_up(div_up(poolsize/getpagesize() * BITS_PER_PAGE,
95 ALIGNOF(struct metaheader));
96 return (struct metaheader *)((char *)pool + pagestatelen);
99 static struct metaheader *next_mheader(void *pool, struct metaheader *mh)
104 return (struct metaheader *)((char *)pool + mh->next);
107 static unsigned long pool_offset(void *pool, void *p)
109 return (char *)p - (char *)pool;
112 void alloc_init(void *pool, unsigned long poolsize)
114 /* FIXME: Alignment assumptions about pool. */
115 unsigned long len, i;
116 struct metaheader *mh;
118 if (poolsize < MIN_SIZE)
121 mh = first_mheader(pool, poolsize);
123 /* len covers all page states, plus the metaheader. */
124 len = (char *)(mh + 1) - (char *)pool;
125 /* Mark all page states FREE */
126 BUILD_ASSERT(FREE == 0);
127 memset(pool, 0, len);
129 /* metaheader len takes us up to next page boundary. */
130 mh->metalen = align_up(len, getpagesize()) - len;
132 /* Mark the pagestate and metadata page(s) allocated. */
133 set_page_state(pool, 0, TAKEN_START);
134 for (i = 1; i < div_up(len, getpagesize()); i++)
135 set_page_state(pool, i, TAKEN);
138 /* Two bits per element, representing page states. Returns 0 on fail. */
139 static unsigned long alloc_from_bitmap(uint8_t *bits, unsigned long elems,
140 unsigned long want, unsigned long align)
146 /* We allocate from far end, to increase ability to expand metadata. */
147 for (i = elems - 1; i >= 0; i--) {
148 switch (get_page_state(bits, i)) {
150 if (++free >= want) {
153 /* They might ask for large alignment. */
154 if (align && i % align)
157 for (j = i+1; j < i + want; j++)
158 set_page_state(bits, j, TAKEN);
159 set_page_state(bits, i, TAKEN_START);
174 static unsigned long alloc_get_pages(void *pool, unsigned long poolsize,
175 unsigned long pages, unsigned long align)
181 /* We allocate from far end, to increase ability to expand metadata. */
182 for (i = poolsize / getpagesize() - 1; i >= 0; i--) {
183 switch (get_page_state(pool, i)) {
185 if (++free >= pages) {
186 unsigned long j, addr;
188 addr = (unsigned long)pool + i * getpagesize();
190 /* They might ask for multi-page alignment. */
194 for (j = i+1; j < i + pages; j++)
195 set_page_state(pool, j, TAKEN);
196 set_page_state(pool, i, TAKEN_START);
211 /* Offset to metadata is at end of page. */
212 static unsigned long *metadata_off(void *pool, unsigned long page)
214 return (unsigned long *)
215 ((char *)pool + (page+1)*getpagesize() - sizeof(unsigned long));
218 static uint8_t *get_page_metadata(void *pool, unsigned long page)
220 return (uint8_t *)pool + *metadata_off(pool, page);
223 static void set_page_metadata(void *pool, unsigned long page, uint8_t *meta)
225 *metadata_off(pool, page) = meta - (uint8_t *)pool;
228 static void *sub_page_alloc(void *pool, unsigned long page,
229 unsigned long size, unsigned long align)
231 uint8_t *bits = get_page_metadata(pool, page);
234 /* TAKEN at end means a bitwise alloc. */
235 assert(get_page_state(bits, getpagesize()/BITMAP_GRANULARITY - 1)
238 /* Our bits are the same as the page bits. */
239 i = alloc_from_bitmap(bits, getpagesize()/BITMAP_GRANULARITY,
240 div_up(size, BITMAP_GRANULARITY),
241 align / BITMAP_GRANULARITY);
243 /* Can't allocate? */
247 return (char *)pool + page*getpagesize() + i*BITMAP_GRANULARITY;
250 static uint8_t *alloc_metaspace(struct metaheader *mh, unsigned long bytes)
252 uint8_t *meta = (uint8_t *)(mh + 1);
253 unsigned long free = 0, len;
256 /* TAKEN tags end a subpage alloc. */
257 for (i = mh->metalen * CHAR_BIT / BITS_PER_PAGE - 1; i >= 0; i -= len) {
258 switch (get_page_state(meta, i)) {
262 if (free == bytes * CHAR_BIT / BITS_PER_PAGE) {
263 /* TAKEN marks end of metablock. */
264 set_page_state(meta, i + free - 1, TAKEN);
265 return meta + i / (CHAR_BIT / BITS_PER_PAGE);
269 /* Skip over this allocated part. */
270 len = BITMAP_METALEN * CHAR_BIT;
281 /* We need this many bytes of metadata. */
282 static uint8_t *new_metadata(void *pool, unsigned long poolsize,
285 struct metaheader *mh, *newmh;
288 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
289 uint8_t *meta = alloc_metaspace(mh, bytes);
295 /* No room for metadata? Can we expand an existing one? */
296 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
297 /* It should end on a page boundary. */
298 unsigned long nextpage;
300 nextpage = pool_offset(pool, (char *)(mh + 1) + mh->metalen);
301 assert(nextpage % getpagesize() == 0);
303 /* Now, can we grab that page? */
304 if (get_page_state(pool, nextpage / getpagesize()) != FREE)
307 /* OK, expand metadata, do it again. */
308 set_page_state(pool, nextpage / getpagesize(), TAKEN);
309 memset((char *)pool + nextpage, 0, getpagesize());
310 mh->metalen += getpagesize();
311 return alloc_metaspace(mh, bytes);
314 /* No metadata left at all? */
315 page = alloc_get_pages(pool, poolsize, div_up(bytes, getpagesize()), 1);
319 newmh = (struct metaheader *)((char *)pool + page * getpagesize());
320 newmh->metalen = getpagesize() - sizeof(*mh);
321 memset(newmh + 1, 0, newmh->metalen);
323 /* Sew it into linked list */
324 mh = first_mheader(pool,poolsize);
325 newmh->next = mh->next;
326 mh->next = (char *)newmh - (char *)pool;
328 return alloc_metaspace(newmh, bytes);
331 static void alloc_free_pages(void *pool, unsigned long pagenum)
333 assert(get_page_state(pool, pagenum) == TAKEN_START);
334 set_page_state(pool, pagenum, FREE);
335 while (get_page_state(pool, ++pagenum) == TAKEN)
336 set_page_state(pool, pagenum, FREE);
339 static void *alloc_sub_page(void *pool, unsigned long poolsize,
340 unsigned long size, unsigned long align)
345 /* Look for partial page. */
346 for (i = 0; i < poolsize / getpagesize(); i++) {
348 if (get_page_state(pool, i) != SUBPAGE)
351 ret = sub_page_alloc(pool, i, size, align);
356 /* Create new SUBPAGE page. */
357 i = alloc_get_pages(pool, poolsize, 1, 1);
361 /* Get metadata for page. */
362 metadata = new_metadata(pool, poolsize, BITMAP_METALEN);
364 alloc_free_pages(pool, i);
368 /* Actually, this is a SUBPAGE page now. */
369 set_page_state(pool, i, SUBPAGE);
371 /* Set metadata pointer for page. */
372 set_page_metadata(pool, i, metadata);
374 /* Do allocation like normal */
375 return sub_page_alloc(pool, i, size, align);
378 void *alloc_get(void *pool, unsigned long poolsize,
379 unsigned long size, unsigned long align)
381 if (poolsize < MIN_SIZE)
384 /* Sub-page allocations have an overhead of 25%. */
385 if (size + size/4 >= getpagesize() || align >= getpagesize()) {
386 unsigned long ret, pages = div_up(size, getpagesize());
388 ret = alloc_get_pages(pool, poolsize, pages, align);
391 return (char *)pool + ret * getpagesize();
394 return alloc_sub_page(pool, poolsize, size, align);
397 static void subpage_free(void *pool, unsigned long pagenum, void *free)
399 unsigned long off = (unsigned long)free % getpagesize();
402 assert(off < SUBPAGE_METAOFF);
403 assert(off % BITMAP_GRANULARITY == 0);
405 metadata = get_page_metadata(pool, pagenum);
407 off /= BITMAP_GRANULARITY;
409 set_page_state(metadata, off++, FREE);
410 while (off < SUBPAGE_METAOFF / BITMAP_GRANULARITY
411 && get_page_state(metadata, off) == TAKEN)
412 set_page_state(metadata, off++, FREE);
414 /* FIXME: If whole page free, free page and metadata. */
417 void alloc_free(void *pool, unsigned long poolsize, void *free)
419 unsigned long pagenum;
420 struct metaheader *mh;
425 assert(poolsize >= MIN_SIZE);
427 mh = first_mheader(pool, poolsize);
428 assert((char *)free >= (char *)(mh + 1) + mh->metalen);
429 assert((char *)pool + poolsize > (char *)free);
431 pagenum = pool_offset(pool, free) / getpagesize();
433 if (get_page_state(pool, pagenum) == SUBPAGE)
434 subpage_free(pool, pagenum, free);
436 assert((unsigned long)free % getpagesize() == 0);
437 alloc_free_pages(pool, pagenum);
441 static bool is_metadata_page(void *pool, unsigned long poolsize,
444 struct metaheader *mh;
446 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
447 unsigned long start, end;
449 start = pool_offset(pool, mh);
450 end = pool_offset(pool, (char *)(mh+1) + mh->metalen);
451 if (page >= start/getpagesize() && page < end/getpagesize())
457 static bool check_subpage(void *pool, unsigned long poolsize,
460 unsigned long *mhoff = metadata_off(pool, page);
462 enum page_state last_state = FREE;
464 if (*mhoff + sizeof(struct metaheader) > poolsize)
467 if (*mhoff % ALIGNOF(struct metaheader) != 0)
470 /* It must point to a metadata page. */
471 if (!is_metadata_page(pool, poolsize, *mhoff / getpagesize()))
474 /* Marker at end of subpage allocation is "taken" */
475 if (get_page_state((uint8_t *)pool + *mhoff,
476 getpagesize()/BITMAP_GRANULARITY - 1) != TAKEN)
479 for (i = 0; i < SUBPAGE_METAOFF / BITMAP_GRANULARITY; i++) {
480 enum page_state state;
482 state = get_page_state((uint8_t *)pool + *mhoff, i);
487 if (last_state == FREE)
498 bool alloc_check(void *pool, unsigned long poolsize)
501 struct metaheader *mh;
502 enum page_state last_state = FREE;
503 bool was_metadata = false;
505 if (poolsize < MIN_SIZE)
508 if (get_page_state(pool, 0) != TAKEN_START)
511 /* First check metadata pages. */
512 /* Metadata pages will be marked TAKEN. */
513 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
514 unsigned long start, end;
516 start = pool_offset(pool, mh);
517 if (start + sizeof(*mh) > poolsize)
520 end = pool_offset(pool, (char *)(mh+1) + mh->metalen);
524 /* Non-first pages should start on a page boundary. */
525 if (mh != first_mheader(pool, poolsize)
526 && start % getpagesize() != 0)
529 /* It should end on a page boundary. */
530 if (end % getpagesize() != 0)
534 for (i = 0; i < poolsize / getpagesize(); i++) {
535 enum page_state state = get_page_state(pool, i);
536 bool is_metadata = is_metadata_page(pool, poolsize,i);
540 /* metadata pages are never free. */
546 /* This should continue a previous block. */
547 if (last_state == FREE)
549 if (is_metadata != was_metadata)
553 /* Check metadata pointer etc. */
554 if (!check_subpage(pool, poolsize, i))
558 was_metadata = is_metadata;