8 #include <ccan/build_assert/build_assert.h>
9 #include <ccan/alignof/alignof.h>
12 /* FIXME: We assume getpagesize() doesnt change. Remapping file with
13 * different pagesize should still work. */
15 /* FIXME: Doesn't handle non-page-aligned poolsize. */
18 #define MIN_SIZE (getpagesize() * 2)
20 /* What's the granularity of sub-page allocs? */
21 #define BITMAP_GRANULARITY 4
25 * file := pagestates pad uniform-cache metadata
26 * pagestates := pages * 2-bits-per-page
27 * pad := pad to next ALIGNOF(metaheader)
29 * metadata := metalen next-ptr metabits
30 * metabits := freeblock | bitblock | uniformblock
32 * bitblock := BITMAP + 2-bits-per-bit-in-page + pad-to-byte
33 * uniformblock := UNIFORM + 14-bit-byte-len + bits + pad-to-byte
35 #define UNIFORM_CACHE_NUM 16
38 uint16_t size[UNIFORM_CACHE_NUM];
39 /* These could be u32 if we're prepared to limit size. */
40 unsigned long page[UNIFORM_CACHE_NUM];
45 /* Next meta header, or 0 */
47 /* Bits start here. */
50 /* Assumes a is a power of two. */
51 static unsigned long align_up(unsigned long x, unsigned long a)
53 return (x + a - 1) & ~(a - 1);
56 static unsigned long align_down(unsigned long x, unsigned long a)
61 static unsigned long div_up(unsigned long x, unsigned long a)
63 return (x + a - 1) / a;
66 /* It turns out that we spend a lot of time dealing with bit pairs.
67 * These routines manipulate them.
69 static uint8_t get_bit_pair(const uint8_t *bits, unsigned long index)
71 return bits[index * 2 / CHAR_BIT] >> (index * 2 % CHAR_BIT) & 3;
74 static void set_bit_pair(uint8_t *bits, unsigned long index, uint8_t val)
76 bits[index * 2 / CHAR_BIT] &= ~(3 << (index * 2 % CHAR_BIT));
77 bits[index * 2 / CHAR_BIT] |= (val << (index * 2 % CHAR_BIT));
80 /* This is used for page states and subpage allocations */
86 SPECIAL, /* Sub-page allocation for page states. */
89 /* The types for subpage metadata. */
90 enum sub_metadata_type
92 /* FREE is same as alloc state */
93 BITMAP = 1, /* bitmap allocated page */
94 UNIFORM, /* uniform size allocated page */
97 /* Page states are represented by bitpairs, at the start of the pool. */
98 #define BITS_PER_PAGE 2
100 /* How much metadata info per byte? */
101 #define METADATA_PER_BYTE (CHAR_BIT / 2)
103 static uint8_t *get_page_statebits(const void *pool)
105 return (uint8_t *)pool + sizeof(struct uniform_cache);
108 static enum alloc_state get_page_state(const void *pool, unsigned long page)
110 return get_bit_pair(get_page_statebits(pool), page);
113 static void set_page_state(void *pool, unsigned long page, enum alloc_state s)
115 set_bit_pair(get_page_statebits(pool), page, s);
118 /* The offset of metadata for a subpage allocation is found at the end
120 #define SUBPAGE_METAOFF (getpagesize() - sizeof(unsigned long))
122 /* This is the length of metadata in bits. It consists of two bits
123 * for every BITMAP_GRANULARITY of usable bytes in the page, then two
124 * bits for the tailer.. */
125 #define BITMAP_METABITLEN \
126 ((div_up(SUBPAGE_METAOFF, BITMAP_GRANULARITY) + 1) * BITS_PER_PAGE)
128 /* This is the length in bytes. */
129 #define BITMAP_METALEN (div_up(BITMAP_METABITLEN, CHAR_BIT))
131 static struct metaheader *first_mheader(void *pool, unsigned long poolsize)
133 unsigned int pagestatelen;
135 pagestatelen = align_up(div_up(poolsize/getpagesize() * BITS_PER_PAGE,
137 ALIGNOF(struct metaheader));
138 return (struct metaheader *)(get_page_statebits(pool) + pagestatelen);
141 static struct metaheader *next_mheader(void *pool, struct metaheader *mh)
146 return (struct metaheader *)((char *)pool + mh->next);
149 static unsigned long pool_offset(void *pool, void *p)
151 return (char *)p - (char *)pool;
154 void alloc_init(void *pool, unsigned long poolsize)
156 /* FIXME: Alignment assumptions about pool. */
157 unsigned long len, i;
158 struct metaheader *mh;
160 if (poolsize < MIN_SIZE)
163 mh = first_mheader(pool, poolsize);
165 /* Mark all page states FREE, all uniform caches zero, and all of
166 * metaheader bitmap which takes rest of first page. */
167 len = align_up(pool_offset(pool, mh + 1), getpagesize());
168 BUILD_ASSERT(FREE == 0);
169 memset(pool, 0, len);
171 /* Mark the pagestate and metadata page(s) allocated. */
172 set_page_state(pool, 0, TAKEN_START);
173 for (i = 1; i < div_up(len, getpagesize()); i++)
174 set_page_state(pool, i, TAKEN);
177 /* Two bits per element, representing page states. Returns 0 on fail.
178 * off is used to allocate from subpage bitmaps, which use the first 2
179 * bits as the type, so the real bitmap is offset by 1. */
180 static unsigned long alloc_from_bitmap(uint8_t *bits, unsigned long off,
182 unsigned long want, unsigned long align)
188 /* We allocate from far end, to increase ability to expand metadata. */
189 for (i = elems - 1; i >= 0; i--) {
190 switch (get_bit_pair(bits, off+i)) {
192 if (++free >= want) {
195 /* They might ask for large alignment. */
196 if (align && i % align)
199 set_bit_pair(bits, off+i, TAKEN_START);
200 for (j = i+1; j < i + want; j++)
201 set_bit_pair(bits, off+j, TAKEN);
216 static unsigned long alloc_get_pages(void *pool, unsigned long poolsize,
217 unsigned long pages, unsigned long align)
219 return alloc_from_bitmap(get_page_statebits(pool),
220 0, poolsize / getpagesize(), pages,
221 align / getpagesize());
224 /* Offset to metadata is at end of page. */
225 static unsigned long *metadata_off(void *pool, unsigned long page)
227 return (unsigned long *)
228 ((char *)pool + (page+1)*getpagesize() - sizeof(unsigned long));
231 static uint8_t *get_page_metadata(void *pool, unsigned long page)
233 return (uint8_t *)pool + *metadata_off(pool, page);
236 static void set_page_metadata(void *pool, unsigned long page, uint8_t *meta)
238 *metadata_off(pool, page) = meta - (uint8_t *)pool;
241 static unsigned long sub_page_alloc(void *pool, unsigned long page,
242 unsigned long size, unsigned long align)
244 uint8_t *bits = get_page_metadata(pool, page);
246 enum sub_metadata_type type;
248 type = get_bit_pair(bits, 0);
250 /* If this is a uniform page, we can't allocate from it. */
254 assert(type == BITMAP);
256 /* We use a standart bitmap, but offset because of that BITMAP
258 i = alloc_from_bitmap(bits, 1, SUBPAGE_METAOFF/BITMAP_GRANULARITY,
259 div_up(size, BITMAP_GRANULARITY),
260 align / BITMAP_GRANULARITY);
262 /* Can't allocate? */
266 /* i-1 because of the header. */
267 return page*getpagesize() + (i-1)*BITMAP_GRANULARITY;
270 /* We look at the page states to figure out where the allocation for this
272 static unsigned long get_metalen(void *pool, unsigned long poolsize,
273 struct metaheader *mh)
275 unsigned long i, first, pages = poolsize / getpagesize();
277 first = pool_offset(pool, mh + 1)/getpagesize();
279 for (i = first + 1; i < pages && get_page_state(pool,i) == TAKEN; i++);
281 return i * getpagesize() - pool_offset(pool, mh + 1);
284 static unsigned int uniform_metalen(unsigned int usize)
286 unsigned int metalen;
288 assert(usize < (1 << 14));
290 /* Two bits for the header, 14 bits for size, then one bit for each
291 * element the page can hold. Round up to number of bytes. */
292 metalen = div_up(2 + 14 + SUBPAGE_METAOFF / usize, CHAR_BIT);
294 /* To ensure metaheader is always aligned, round bytes up. */
295 metalen = align_up(metalen, ALIGNOF(struct metaheader));
300 static unsigned int decode_usize(uint8_t *meta)
302 return ((unsigned)meta[1] << (CHAR_BIT-2)) | (meta[0] >> 2);
305 static void encode_usize(uint8_t *meta, unsigned int usize)
307 meta[0] = (UNIFORM | (usize << 2));
308 meta[1] = (usize >> (CHAR_BIT - 2));
311 static uint8_t *alloc_metaspace(void *pool, unsigned long poolsize,
312 struct metaheader *mh, unsigned long bytes,
313 enum sub_metadata_type type)
315 uint8_t *meta = (uint8_t *)(mh + 1);
316 unsigned long free = 0, len, i, metalen;
318 metalen = get_metalen(pool, poolsize, mh);
320 /* Walk through metadata looking for free. */
321 for (i = 0; i < metalen * METADATA_PER_BYTE; i += len) {
322 switch (get_bit_pair(meta, i)) {
326 if (free == bytes * METADATA_PER_BYTE) {
327 /* Mark this as a bitmap. */
328 set_bit_pair(meta, i - free + 1, type);
329 return meta + (i - free + 1)/METADATA_PER_BYTE;
333 /* Skip over this allocated part. */
334 len = BITMAP_METALEN * METADATA_PER_BYTE;
338 /* Figure metalen given usize. */
339 len = decode_usize(meta + i / METADATA_PER_BYTE);
340 len = uniform_metalen(len) * METADATA_PER_BYTE;
351 /* We need this many bytes of metadata. */
352 static uint8_t *new_metadata(void *pool, unsigned long poolsize,
353 unsigned long bytes, enum sub_metadata_type type)
355 struct metaheader *mh, *newmh;
359 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh))
360 if ((meta = alloc_metaspace(pool, poolsize, mh, bytes, type)))
363 /* No room for metadata? Can we expand an existing one? */
364 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
365 unsigned long nextpage;
367 /* We start on this page. */
368 nextpage = pool_offset(pool, (char *)(mh+1))/getpagesize();
369 /* Iterate through any other pages we own. */
370 while (get_page_state(pool, ++nextpage) == TAKEN);
372 /* Now, can we grab that page? */
373 if (get_page_state(pool, nextpage) != FREE)
376 /* OK, expand metadata, do it again. */
377 set_page_state(pool, nextpage, TAKEN);
378 BUILD_ASSERT(FREE == 0);
379 memset((char *)pool + nextpage*getpagesize(), 0, getpagesize());
380 return alloc_metaspace(pool, poolsize, mh, bytes, type);
383 /* No metadata left at all? */
384 page = alloc_get_pages(pool, poolsize, div_up(bytes, getpagesize()), 1);
388 newmh = (struct metaheader *)((char *)pool + page * getpagesize());
389 BUILD_ASSERT(FREE == 0);
390 memset(newmh + 1, 0, getpagesize() - sizeof(*mh));
392 /* Sew it into linked list */
393 mh = first_mheader(pool,poolsize);
394 newmh->next = mh->next;
395 mh->next = pool_offset(pool, newmh);
397 return alloc_metaspace(pool, poolsize, newmh, bytes, type);
400 static void alloc_free_pages(void *pool, unsigned long pagenum)
402 assert(get_page_state(pool, pagenum) == TAKEN_START);
403 set_page_state(pool, pagenum, FREE);
404 while (get_page_state(pool, ++pagenum) == TAKEN)
405 set_page_state(pool, pagenum, FREE);
408 static void maybe_transform_uniform_page(void *pool, unsigned long offset)
410 /* FIXME: If possible and page isn't full, change to a bitmap */
413 /* Returns 0 or the size of the uniform alloc to use */
414 static unsigned long suitable_for_uc(unsigned long size, unsigned long align)
416 unsigned long num_elems, wastage, usize;
417 unsigned long bitmap_cost;
422 /* Fix up silly alignments. */
423 usize = align_up(size, align);
425 /* How many can fit in this page? */
426 num_elems = SUBPAGE_METAOFF / usize;
428 /* Can happen with bigger alignments. */
432 /* Usize maxes out at 14 bits. */
433 if (usize >= (1 << 14))
436 /* How many bytes would be left at the end? */
437 wastage = SUBPAGE_METAOFF % usize;
439 /* If we can get a larger allocation within alignment constraints, we
440 * should do it, otherwise might as well leave wastage at the end. */
441 usize += align_down(wastage / num_elems, align);
443 /* Bitmap allocation costs 2 bits per BITMAP_GRANULARITY bytes, plus
444 * however much we waste in rounding up to BITMAP_GRANULARITY. */
445 bitmap_cost = 2 * div_up(size, BITMAP_GRANULARITY)
446 + CHAR_BIT * (align_up(size, BITMAP_GRANULARITY) - size);
448 /* Our cost is 1 bit, plus usize overhead */
449 if (bitmap_cost < 1 + (usize - size) * CHAR_BIT)
455 static unsigned long uniform_alloc(void *pool, unsigned long poolsize,
456 struct uniform_cache *uc,
459 uint8_t *metadata = get_page_metadata(pool, uc->page[ucnum]) + 2;
460 unsigned long i, max;
462 /* Simple one-bit-per-object bitmap. */
463 max = SUBPAGE_METAOFF / uc->size[ucnum];
464 for (i = 0; i < max; i++) {
465 if (!(metadata[i / CHAR_BIT] & (1 << (i % CHAR_BIT)))) {
466 metadata[i / CHAR_BIT] |= (1 << (i % CHAR_BIT));
467 return uc->page[ucnum] * getpagesize()
468 + i * uc->size[ucnum];
475 static unsigned long new_uniform_page(void *pool, unsigned long poolsize,
478 unsigned long page, metalen;
481 page = alloc_get_pages(pool, poolsize, 1, 1);
485 metalen = uniform_metalen(usize);
487 /* Get metadata for page. */
488 metadata = new_metadata(pool, poolsize, metalen, UNIFORM);
490 alloc_free_pages(pool, page);
494 encode_usize(metadata, usize);
496 BUILD_ASSERT(FREE == 0);
497 memset(metadata + 2, 0, metalen - 2);
499 /* Actually, this is a subpage page now. */
500 set_page_state(pool, page, SPECIAL);
502 /* Set metadata pointer for page. */
503 set_page_metadata(pool, page, metadata);
508 static unsigned long alloc_sub_page(void *pool, unsigned long poolsize,
509 unsigned long size, unsigned long align)
511 unsigned long i, usize;
513 struct uniform_cache *uc = pool;
515 usize = suitable_for_uc(size, align);
517 /* Look for a uniform page. */
518 for (i = 0; i < UNIFORM_CACHE_NUM; i++) {
519 if (uc->size[i] == usize) {
521 ret = uniform_alloc(pool, poolsize, uc, i);
524 /* OK, that one is full, remove from cache. */
530 /* OK, try a new uniform page. Use random discard for now. */
531 i = random() % UNIFORM_CACHE_NUM;
532 maybe_transform_uniform_page(pool, uc->page[i]);
534 uc->page[i] = new_uniform_page(pool, poolsize, usize);
537 return uniform_alloc(pool, poolsize, uc, i);
542 /* Look for partial page. */
543 for (i = 0; i < poolsize / getpagesize(); i++) {
545 if (get_page_state(pool, i) != SPECIAL)
548 ret = sub_page_alloc(pool, i, size, align);
553 /* Create new SUBPAGE page. */
554 i = alloc_get_pages(pool, poolsize, 1, 1);
558 /* Get metadata for page. */
559 metadata = new_metadata(pool, poolsize, BITMAP_METALEN, BITMAP);
561 alloc_free_pages(pool, i);
565 /* Actually, this is a subpage page now. */
566 set_page_state(pool, i, SPECIAL);
568 /* Set metadata pointer for page. */
569 set_page_metadata(pool, i, metadata);
571 /* Do allocation like normal */
572 return sub_page_alloc(pool, i, size, align);
575 static bool bitmap_page_is_empty(uint8_t *meta)
579 /* Skip the header (first bit of metadata). */
580 for (i = 1; i < SUBPAGE_METAOFF/BITMAP_GRANULARITY+1; i++)
581 if (get_bit_pair(meta, i) != FREE)
587 static bool uniform_page_is_empty(uint8_t *meta)
589 unsigned int i, metalen;
591 metalen = uniform_metalen(decode_usize(meta));
593 /* Skip the header (first two bytes of metadata). */
594 for (i = 2; i < metalen + 2; i++) {
595 BUILD_ASSERT(FREE == 0);
602 static bool special_page_is_empty(void *pool, unsigned long page)
605 enum sub_metadata_type type;
607 meta = get_page_metadata(pool, page);
608 type = get_bit_pair(meta, 0);
612 return uniform_page_is_empty(meta);
614 return bitmap_page_is_empty(meta);
620 static void clear_special_metadata(void *pool, unsigned long page)
623 enum sub_metadata_type type;
625 meta = get_page_metadata(pool, page);
626 type = get_bit_pair(meta, 0);
630 /* First two bytes are the header, rest is already FREE */
631 BUILD_ASSERT(FREE == 0);
635 /* First two bits is the header. */
636 BUILD_ASSERT(BITMAP_METALEN > 1);
644 /* Returns true if we cleaned any pages. */
645 static bool clean_empty_subpages(void *pool, unsigned long poolsize)
648 bool progress = false;
650 for (i = 0; i < poolsize/getpagesize(); i++) {
651 if (get_page_state(pool, i) != SPECIAL)
654 if (special_page_is_empty(pool, i)) {
655 clear_special_metadata(pool, i);
656 set_page_state(pool, i, FREE);
663 /* Returns true if we cleaned any pages. */
664 static bool clean_metadata(void *pool, unsigned long poolsize)
666 struct metaheader *mh, *prev_mh = NULL;
667 bool progress = false;
669 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
672 unsigned long metalen = get_metalen(pool, poolsize, mh);
674 meta = (uint8_t *)(mh + 1);
675 BUILD_ASSERT(FREE == 0);
676 for (i = metalen - 1; i > 0; i--)
680 /* Completely empty? */
681 if (prev_mh && i == metalen) {
682 alloc_free_pages(pool,
683 pool_offset(pool, mh)/getpagesize());
684 prev_mh->next = mh->next;
690 /* Some pages at end are free? */
691 for (p = (uint8_t *)(mh+1) + metalen - getpagesize();
693 p -= getpagesize()) {
706 void *alloc_get(void *pool, unsigned long poolsize,
707 unsigned long size, unsigned long align)
709 bool subpage_clean = false, metadata_clean = false;
712 if (poolsize < MIN_SIZE)
716 /* Sub-page allocations have an overhead of ~12%. */
717 if (size + size/8 >= getpagesize() || align >= getpagesize()) {
718 unsigned long pages = div_up(size, getpagesize());
720 ret = alloc_get_pages(pool, poolsize, pages, align)
723 ret = alloc_sub_page(pool, poolsize, size, align);
726 return (char *)pool + ret;
728 /* Allocation failed: garbage collection. */
729 if (!subpage_clean) {
730 subpage_clean = true;
731 if (clean_empty_subpages(pool, poolsize))
735 if (!metadata_clean) {
736 metadata_clean = true;
737 if (clean_metadata(pool, poolsize))
741 /* FIXME: Compact metadata? */
745 static void bitmap_free(void *pool, unsigned long pagenum, unsigned long off,
748 assert(off % BITMAP_GRANULARITY == 0);
750 off /= BITMAP_GRANULARITY;
752 /* Offset by one because first bit is used for header. */
755 set_bit_pair(metadata, off++, FREE);
756 while (off < SUBPAGE_METAOFF / BITMAP_GRANULARITY
757 && get_bit_pair(metadata, off) == TAKEN)
758 set_bit_pair(metadata, off++, FREE);
761 static void uniform_free(void *pool, unsigned long pagenum, unsigned long off,
764 unsigned int usize, bit;
766 usize = decode_usize(metadata);
767 /* Must have been this size. */
768 assert(off % usize == 0);
774 /* Must have been allocated. */
775 assert(metadata[bit / CHAR_BIT] & (1 << (bit % CHAR_BIT)));
776 metadata[bit / CHAR_BIT] &= ~(1 << (bit % CHAR_BIT));
779 static void subpage_free(void *pool, unsigned long pagenum, void *free)
781 unsigned long off = (unsigned long)free % getpagesize();
782 uint8_t *metadata = get_page_metadata(pool, pagenum);
783 enum sub_metadata_type type;
785 type = get_bit_pair(metadata, 0);
787 assert(off < SUBPAGE_METAOFF);
791 bitmap_free(pool, pagenum, off, metadata);
794 uniform_free(pool, pagenum, off, metadata);
801 void alloc_free(void *pool, unsigned long poolsize, void *free)
803 unsigned long pagenum;
804 struct metaheader *mh;
809 assert(poolsize >= MIN_SIZE);
811 mh = first_mheader(pool, poolsize);
812 assert((char *)free >= (char *)(mh + 1));
813 assert((char *)pool + poolsize > (char *)free);
815 pagenum = pool_offset(pool, free) / getpagesize();
817 if (get_page_state(pool, pagenum) == SPECIAL)
818 subpage_free(pool, pagenum, free);
820 assert((unsigned long)free % getpagesize() == 0);
821 alloc_free_pages(pool, pagenum);
825 unsigned long alloc_size(void *pool, unsigned long poolsize, void *p)
827 unsigned long len, pagenum;
828 struct metaheader *mh;
830 assert(poolsize >= MIN_SIZE);
832 mh = first_mheader(pool, poolsize);
833 assert((char *)p >= (char *)(mh + 1));
834 assert((char *)pool + poolsize > (char *)p);
836 pagenum = pool_offset(pool, p) / getpagesize();
838 if (get_page_state(pool, pagenum) == SPECIAL) {
839 unsigned long off = (unsigned long)p % getpagesize();
840 uint8_t *metadata = get_page_metadata(pool, pagenum);
841 enum sub_metadata_type type = get_bit_pair(metadata, 0);
843 assert(off < SUBPAGE_METAOFF);
847 assert(off % BITMAP_GRANULARITY == 0);
848 off /= BITMAP_GRANULARITY;
850 /* Offset by one because first bit used for header. */
852 len = BITMAP_GRANULARITY;
853 while (++off < SUBPAGE_METAOFF / BITMAP_GRANULARITY
854 && get_bit_pair(metadata, off) == TAKEN)
855 len += BITMAP_GRANULARITY;
858 len = decode_usize(metadata);
865 while (get_page_state(pool, ++pagenum) == TAKEN)
866 len += getpagesize();
872 static bool is_metadata_page(void *pool, unsigned long poolsize,
875 struct metaheader *mh;
877 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
878 unsigned long start, end;
880 start = pool_offset(pool, mh);
881 end = pool_offset(pool, (char *)(mh+1)
882 + get_metalen(pool, poolsize, mh));
883 if (page >= start/getpagesize() && page < end/getpagesize())
889 static bool check_bitmap_metadata(void *pool, unsigned long *mhoff)
891 enum alloc_state last_state = FREE;
894 for (i = 0; i < SUBPAGE_METAOFF / BITMAP_GRANULARITY; i++) {
895 enum alloc_state state;
897 /* +1 because header is the first byte. */
898 state = get_bit_pair((uint8_t *)pool + *mhoff, i+1);
903 if (last_state == FREE)
914 static bool check_uniform_metadata(void *pool, unsigned long *mhoff)
916 uint8_t *meta = (uint8_t *)pool + *mhoff;
917 unsigned int i, usize;
918 struct uniform_cache *uc = pool;
920 usize = decode_usize(meta);
921 if (usize == 0 || suitable_for_uc(usize, 1) != usize)
924 /* If it's in uniform cache, make sure that agrees on size. */
925 for (i = 0; i < UNIFORM_CACHE_NUM; i++) {
931 ucm = get_page_metadata(pool, uc->page[i]);
935 if (usize != uc->size[i])
941 static bool check_subpage(void *pool, unsigned long poolsize,
944 unsigned long *mhoff = metadata_off(pool, page);
946 if (*mhoff + sizeof(struct metaheader) > poolsize)
949 if (*mhoff % ALIGNOF(struct metaheader) != 0)
952 /* It must point to a metadata page. */
953 if (!is_metadata_page(pool, poolsize, *mhoff / getpagesize()))
956 /* Header at start of subpage allocation */
957 switch (get_bit_pair((uint8_t *)pool + *mhoff, 0)) {
959 return check_bitmap_metadata(pool, mhoff);
961 return check_uniform_metadata(pool, mhoff);
968 bool alloc_check(void *pool, unsigned long poolsize)
971 struct metaheader *mh;
972 enum alloc_state last_state = FREE;
973 bool was_metadata = false;
975 if (poolsize < MIN_SIZE)
978 if (get_page_state(pool, 0) != TAKEN_START)
981 /* First check metadata pages. */
982 /* Metadata pages will be marked TAKEN. */
983 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
984 unsigned long start, end;
986 start = pool_offset(pool, mh);
987 if (start + sizeof(*mh) > poolsize)
990 end = pool_offset(pool, (char *)(mh+1)
991 + get_metalen(pool, poolsize, mh));
995 /* Non-first pages should start on a page boundary. */
996 if (mh != first_mheader(pool, poolsize)
997 && start % getpagesize() != 0)
1000 /* It should end on a page boundary. */
1001 if (end % getpagesize() != 0)
1005 for (i = 0; i < poolsize / getpagesize(); i++) {
1006 enum alloc_state state = get_page_state(pool, i);
1007 bool is_metadata = is_metadata_page(pool, poolsize,i);
1011 /* metadata pages are never free. */
1017 /* This should continue a previous block. */
1018 if (last_state == FREE)
1020 if (is_metadata != was_metadata)
1024 /* Check metadata pointer etc. */
1025 if (!check_subpage(pool, poolsize, i))
1029 was_metadata = is_metadata;
1034 void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
1036 struct metaheader *mh;
1037 struct uniform_cache *uc = pool;
1038 unsigned long pagebitlen, metadata_pages, count[1<<BITS_PER_PAGE], tot;
1041 if (poolsize < MIN_SIZE) {
1042 fprintf(out, "Pool smaller than %u: no content\n", MIN_SIZE);
1047 for (i = 0; i < UNIFORM_CACHE_NUM; i++)
1048 tot += (uc->size[i] != 0);
1049 fprintf(out, "Uniform cache (%lu entries):\n", tot);
1050 for (i = 0; i < UNIFORM_CACHE_NUM; i++) {
1051 unsigned int j, total = 0;
1057 /* First two bytes are header. */
1058 meta = get_page_metadata(pool, uc->page[i]) + 2;
1060 for (j = 0; j < SUBPAGE_METAOFF / uc->size[i]; j++)
1061 if (meta[j / 8] & (1 << (j % 8)))
1064 printf(" %u: %u/%zu (%zu%% density)\n",
1065 uc->size[j], total, SUBPAGE_METAOFF / uc->size[i],
1066 (total * 100) / (SUBPAGE_METAOFF / uc->size[i]));
1069 memset(count, 0, sizeof(count));
1070 for (i = 0; i < poolsize / getpagesize(); i++)
1071 count[get_page_state(pool, i)]++;
1073 mh = first_mheader(pool, poolsize);
1074 pagebitlen = (uint8_t *)mh - get_page_statebits(pool);
1075 fprintf(out, "%lu bytes of page bits: FREE/TAKEN/TAKEN_START/SUBPAGE = %lu/%lu/%lu/%lu\n",
1076 pagebitlen, count[0], count[1], count[2], count[3]);
1078 /* One metadata page for every page of page bits. */
1079 metadata_pages = div_up(pagebitlen, getpagesize());
1081 /* Now do each metadata page. */
1082 for (; mh; mh = next_mheader(pool,mh)) {
1083 unsigned long free = 0, bitmapblocks = 0, uniformblocks = 0,
1084 len = 0, uniformlen = 0, bitmaplen = 0, metalen;
1085 uint8_t *meta = (uint8_t *)(mh + 1);
1087 metalen = get_metalen(pool, poolsize, mh);
1088 metadata_pages += (sizeof(*mh) + metalen) / getpagesize();
1090 for (i = 0; i < metalen * METADATA_PER_BYTE; i += len) {
1091 switch (get_bit_pair(meta, i)) {
1097 /* Skip over this allocated part. */
1098 len = BITMAP_METALEN * CHAR_BIT;
1103 /* Skip over this part. */
1104 len = decode_usize(meta + i/METADATA_PER_BYTE);
1105 len = uniform_metalen(len) * METADATA_PER_BYTE;
1114 fprintf(out, "Metadata %lu-%lu: %lu free, %lu bitmapblocks, %lu uniformblocks, %lu%% density\n",
1115 pool_offset(pool, mh),
1116 pool_offset(pool, (char *)(mh+1) + metalen),
1117 free, bitmapblocks, uniformblocks,
1118 (bitmaplen + uniformlen) * 100
1119 / (free + bitmaplen + uniformlen));
1122 /* Account for total pages allocated. */
1123 tot = (count[1] + count[2] - metadata_pages) * getpagesize();
1125 fprintf(out, "Total metadata bytes = %lu\n",
1126 metadata_pages * getpagesize());
1128 /* Now do every subpage. */
1129 for (i = 0; i < poolsize / getpagesize(); i++) {
1131 unsigned int j, allocated;
1132 enum sub_metadata_type type;
1134 if (get_page_state(pool, i) != SPECIAL)
1137 memset(count, 0, sizeof(count));
1139 meta = get_page_metadata(pool, i);
1140 type = get_bit_pair(meta, 0);
1142 if (type == BITMAP) {
1143 for (j = 0; j < SUBPAGE_METAOFF/BITMAP_GRANULARITY; j++)
1144 count[get_page_state(meta, j)]++;
1145 allocated = (count[1] + count[2]) * BITMAP_GRANULARITY;
1146 fprintf(out, "Subpage bitmap ");
1148 unsigned int usize = decode_usize(meta);
1150 assert(type == UNIFORM);
1151 fprintf(out, "Subpage uniform (%u) ", usize);
1153 for (j = 0; j < SUBPAGE_METAOFF / usize; j++)
1154 count[!!(meta[j / 8] & (1 << (j % 8)))]++;
1155 allocated = count[1] * usize;
1157 fprintf(out, "%lu: FREE/TAKEN/TAKEN_START = %lu/%lu/%lu %u%% density\n",
1158 i, count[0], count[1], count[2],
1159 allocated * 100 / getpagesize());
1163 /* This is optimistic, since we overalloc in several cases. */
1164 fprintf(out, "Best possible allocation density = %lu%%\n",
1165 tot * 100 / poolsize);