8 #include "build_assert/build_assert.h"
11 /* FIXME: We assume getpagesize() doesnt change. Remapping file with
12 * different pagesize should still work. */
14 #define ALIGNOF(t) __alignof__(t)
16 /* Alignment by measuring structure padding. */
17 #define ALIGNOF(t) (sizeof(struct { char c; t _h; }) - 1 - sizeof(t))
20 /* FIXME: Doesn't handle non-page-aligned poolsize. */
23 #define MIN_SIZE (getpagesize() * 2)
25 /* What's the granularity of sub-page allocs? */
26 #define BITMAP_GRANULARITY 4
30 * file := pagestates pad uniform-cache metadata
31 * pagestates := pages * 2-bits-per-page
32 * pad := pad to next ALIGNOF(metaheader)
34 * metadata := metalen next-ptr metabits
35 * metabits := freeblock | bitblock | uniformblock
37 * bitblock := BITMAP + 2-bits-per-bit-in-page + pad-to-byte
38 * uniformblock := UNIFORM + 14-bit-byte-len + bits + pad-to-byte
40 #define UNIFORM_CACHE_NUM 16
43 uint16_t size[UNIFORM_CACHE_NUM];
44 /* These could be u32 if we're prepared to limit size. */
45 unsigned long page[UNIFORM_CACHE_NUM];
50 /* Next meta header, or 0 */
52 /* Bits start here. */
55 /* Assumes a is a power of two. */
56 static unsigned long align_up(unsigned long x, unsigned long a)
58 return (x + a - 1) & ~(a - 1);
61 static unsigned long align_down(unsigned long x, unsigned long a)
66 static unsigned long div_up(unsigned long x, unsigned long a)
68 return (x + a - 1) / a;
71 /* It turns out that we spend a lot of time dealing with bit pairs.
72 * These routines manipulate them.
74 static uint8_t get_bit_pair(const uint8_t *bits, unsigned long index)
76 return bits[index * 2 / CHAR_BIT] >> (index * 2 % CHAR_BIT) & 3;
79 static void set_bit_pair(uint8_t *bits, unsigned long index, uint8_t val)
81 bits[index * 2 / CHAR_BIT] &= ~(3 << (index * 2 % CHAR_BIT));
82 bits[index * 2 / CHAR_BIT] |= (val << (index * 2 % CHAR_BIT));
85 /* This is used for page states and subpage allocations */
91 SPECIAL, /* Sub-page allocation for page states. */
94 /* The types for subpage metadata. */
95 enum sub_metadata_type
97 /* FREE is same as alloc state */
98 BITMAP = 1, /* bitmap allocated page */
99 UNIFORM, /* uniform size allocated page */
102 /* Page states are represented by bitpairs, at the start of the pool. */
103 #define BITS_PER_PAGE 2
105 static uint8_t *get_page_statebits(const void *pool)
107 return (uint8_t *)pool + sizeof(struct uniform_cache);
110 static enum alloc_state get_page_state(const void *pool, unsigned long page)
112 return get_bit_pair(get_page_statebits(pool), page);
115 static void set_page_state(void *pool, unsigned long page, enum alloc_state s)
117 set_bit_pair(get_page_statebits(pool), page, s);
120 /* The offset of metadata for a subpage allocation is found at the end
122 #define SUBPAGE_METAOFF (getpagesize() - sizeof(unsigned long))
124 /* This is the length of metadata in bits. It consists of two bits
125 * for every BITMAP_GRANULARITY of usable bytes in the page, then two
126 * bits for the tailer.. */
127 #define BITMAP_METABITLEN \
128 ((div_up(SUBPAGE_METAOFF, BITMAP_GRANULARITY) + 1) * BITS_PER_PAGE)
130 /* This is the length in bytes. */
131 #define BITMAP_METALEN (div_up(BITMAP_METABITLEN, CHAR_BIT))
133 static struct metaheader *first_mheader(void *pool, unsigned long poolsize)
135 unsigned int pagestatelen;
137 pagestatelen = align_up(div_up(poolsize/getpagesize() * BITS_PER_PAGE,
139 ALIGNOF(struct metaheader));
140 return (struct metaheader *)(get_page_statebits(pool) + pagestatelen);
143 static struct metaheader *next_mheader(void *pool, struct metaheader *mh)
148 return (struct metaheader *)((char *)pool + mh->next);
151 static unsigned long pool_offset(void *pool, void *p)
153 return (char *)p - (char *)pool;
156 void alloc_init(void *pool, unsigned long poolsize)
158 /* FIXME: Alignment assumptions about pool. */
159 unsigned long len, i;
160 struct metaheader *mh;
162 if (poolsize < MIN_SIZE)
165 mh = first_mheader(pool, poolsize);
167 /* Mark all page states FREE, all uniform caches zero, and all of
168 * metaheader bitmap which takes rest of first page. */
169 len = align_up(pool_offset(pool, mh + 1), getpagesize());
170 BUILD_ASSERT(FREE == 0);
171 memset(pool, 0, len);
173 /* Mark the pagestate and metadata page(s) allocated. */
174 set_page_state(pool, 0, TAKEN_START);
175 for (i = 1; i < div_up(len, getpagesize()); i++)
176 set_page_state(pool, i, TAKEN);
179 /* Two bits per element, representing page states. Returns 0 on fail.
180 * off is used to allocate from subpage bitmaps, which use the first 2
181 * bits as the type, so the real bitmap is offset by 1. */
182 static unsigned long alloc_from_bitmap(uint8_t *bits, unsigned long off,
184 unsigned long want, unsigned long align)
190 /* We allocate from far end, to increase ability to expand metadata. */
191 for (i = elems - 1; i >= 0; i--) {
192 switch (get_bit_pair(bits, off+i)) {
194 if (++free >= want) {
197 /* They might ask for large alignment. */
198 if (align && i % align)
201 set_bit_pair(bits, off+i, TAKEN_START);
202 for (j = i+1; j < i + want; j++)
203 set_bit_pair(bits, off+j, TAKEN);
218 static unsigned long alloc_get_pages(void *pool, unsigned long poolsize,
219 unsigned long pages, unsigned long align)
221 return alloc_from_bitmap(get_page_statebits(pool),
222 0, poolsize / getpagesize(), pages,
223 align / getpagesize());
226 /* Offset to metadata is at end of page. */
227 static unsigned long *metadata_off(void *pool, unsigned long page)
229 return (unsigned long *)
230 ((char *)pool + (page+1)*getpagesize() - sizeof(unsigned long));
233 static uint8_t *get_page_metadata(void *pool, unsigned long page)
235 return (uint8_t *)pool + *metadata_off(pool, page);
238 static void set_page_metadata(void *pool, unsigned long page, uint8_t *meta)
240 *metadata_off(pool, page) = meta - (uint8_t *)pool;
243 static unsigned long sub_page_alloc(void *pool, unsigned long page,
244 unsigned long size, unsigned long align)
246 uint8_t *bits = get_page_metadata(pool, page);
248 enum sub_metadata_type type;
250 type = get_bit_pair(bits, 0);
252 /* If this is a uniform page, we can't allocate from it. */
256 assert(type == BITMAP);
258 /* We use a standart bitmap, but offset because of that BITMAP
260 i = alloc_from_bitmap(bits, 1, SUBPAGE_METAOFF/BITMAP_GRANULARITY,
261 div_up(size, BITMAP_GRANULARITY),
262 align / BITMAP_GRANULARITY);
264 /* Can't allocate? */
268 /* i-1 because of the header. */
269 return page*getpagesize() + (i-1)*BITMAP_GRANULARITY;
272 /* We look at the page states to figure out where the allocation for this
274 static unsigned long get_metalen(void *pool, unsigned long poolsize,
275 struct metaheader *mh)
277 unsigned long i, first, pages = poolsize / getpagesize();
279 first = pool_offset(pool, mh + 1)/getpagesize();
281 for (i = first + 1; i < pages && get_page_state(pool,i) == TAKEN; i++);
283 return i * getpagesize() - pool_offset(pool, mh + 1);
286 static unsigned int uniform_metalen(unsigned int usize)
288 unsigned int metalen;
290 assert(usize < (1 << 14));
292 /* Two bits for the header, 14 bits for size, then one bit for each
293 * element the page can hold. Round up to number of bytes. */
294 metalen = div_up(2*CHAR_BIT + SUBPAGE_METAOFF / usize, CHAR_BIT);
296 /* To ensure metaheader is always aligned, round bytes up. */
297 metalen = align_up(metalen, ALIGNOF(struct metaheader));
302 static unsigned int decode_usize(uint8_t *meta)
304 return ((unsigned)meta[1] << (CHAR_BIT-2)) | (meta[0] >> 2);
307 static void encode_usize(uint8_t *meta, unsigned int usize)
309 meta[0] = (UNIFORM | (usize << 2));
310 meta[1] = (usize >> (CHAR_BIT - 2));
313 static uint8_t *alloc_metaspace(void *pool, unsigned long poolsize,
314 struct metaheader *mh, unsigned long bytes,
315 enum sub_metadata_type type)
317 uint8_t *meta = (uint8_t *)(mh + 1);
318 unsigned long free = 0, len, i, metalen;
320 metalen = get_metalen(pool, poolsize, mh);
322 /* Walk through metadata looking for free. */
323 for (i = 0; i < metalen * CHAR_BIT / BITS_PER_PAGE; i += len) {
324 switch (get_bit_pair(meta, i)) {
328 if (free == bytes * CHAR_BIT / BITS_PER_PAGE) {
329 /* Mark this as a bitmap. */
330 set_bit_pair(meta, i - free + 1, type);
331 return meta + (i - free + 1)
332 / (CHAR_BIT / BITS_PER_PAGE);
336 /* Skip over this allocated part. */
337 len = BITMAP_METALEN * CHAR_BIT / BITS_PER_PAGE;
341 /* Figure metalen given usize. */
342 len = decode_usize(meta + i * BITS_PER_PAGE / CHAR_BIT);
343 len = uniform_metalen(len) * CHAR_BIT / BITS_PER_PAGE;
354 /* We need this many bytes of metadata. */
355 static uint8_t *new_metadata(void *pool, unsigned long poolsize,
356 unsigned long bytes, enum sub_metadata_type type)
358 struct metaheader *mh, *newmh;
362 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh))
363 if ((meta = alloc_metaspace(pool, poolsize, mh, bytes, type)))
366 /* No room for metadata? Can we expand an existing one? */
367 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
368 unsigned long nextpage;
370 /* We start on this page. */
371 nextpage = pool_offset(pool, (char *)(mh+1))/getpagesize();
372 /* Iterate through any other pages we own. */
373 while (get_page_state(pool, ++nextpage) == TAKEN);
375 /* Now, can we grab that page? */
376 if (get_page_state(pool, nextpage) != FREE)
379 /* OK, expand metadata, do it again. */
380 set_page_state(pool, nextpage, TAKEN);
381 BUILD_ASSERT(FREE == 0);
382 memset((char *)pool + nextpage*getpagesize(), 0, getpagesize());
383 return alloc_metaspace(pool, poolsize, mh, bytes, type);
386 /* No metadata left at all? */
387 page = alloc_get_pages(pool, poolsize, div_up(bytes, getpagesize()), 1);
391 newmh = (struct metaheader *)((char *)pool + page * getpagesize());
392 BUILD_ASSERT(FREE == 0);
393 memset(newmh + 1, 0, getpagesize() - sizeof(*mh));
395 /* Sew it into linked list */
396 mh = first_mheader(pool,poolsize);
397 newmh->next = mh->next;
398 mh->next = pool_offset(pool, newmh);
400 return alloc_metaspace(pool, poolsize, newmh, bytes, type);
403 static void alloc_free_pages(void *pool, unsigned long pagenum)
405 assert(get_page_state(pool, pagenum) == TAKEN_START);
406 set_page_state(pool, pagenum, FREE);
407 while (get_page_state(pool, ++pagenum) == TAKEN)
408 set_page_state(pool, pagenum, FREE);
411 static void maybe_transform_uniform_page(void *pool, unsigned long offset)
413 /* FIXME: If possible and page isn't full, change to a bitmap */
416 /* Returns 0 or the size of the uniform alloc to use */
417 static unsigned long suitable_for_uc(unsigned long size, unsigned long align)
419 unsigned long num_elems, wastage, usize;
420 unsigned long bitmap_cost;
425 /* Fix up silly alignments. */
426 usize = align_up(size, align);
428 /* How many can fit in this page? */
429 num_elems = SUBPAGE_METAOFF / usize;
431 /* Can happen with bigger alignments. */
435 /* Usize maxes out at 14 bits. */
436 if (usize >= (1 << 14))
439 /* How many bytes would be left at the end? */
440 wastage = SUBPAGE_METAOFF % usize;
442 /* If we can get a larger allocation within alignment constraints, we
443 * should do it, otherwise might as well leave wastage at the end. */
444 usize += align_down(wastage / num_elems, align);
446 /* Bitmap allocation costs 2 bits per BITMAP_GRANULARITY bytes, plus
447 * however much we waste in rounding up to BITMAP_GRANULARITY. */
448 bitmap_cost = 2 * div_up(size, BITMAP_GRANULARITY)
449 + CHAR_BIT * (align_up(size, BITMAP_GRANULARITY) - size);
451 /* Our cost is 1 bit, plus usize overhead */
452 if (bitmap_cost < 1 + (usize - size) * CHAR_BIT)
458 static unsigned long uniform_alloc(void *pool, unsigned long poolsize,
459 struct uniform_cache *uc,
462 uint8_t *metadata = get_page_metadata(pool, uc->page[ucnum]) + 2;
463 unsigned long i, max;
465 /* Simple one-bit-per-object bitmap. */
466 max = SUBPAGE_METAOFF / uc->size[ucnum];
467 for (i = 0; i < max; i++) {
468 if (!(metadata[i / CHAR_BIT] & (1 << (i % CHAR_BIT)))) {
469 metadata[i / CHAR_BIT] |= (1 << (i % CHAR_BIT));
470 return uc->page[ucnum] * getpagesize()
471 + i * uc->size[ucnum];
478 static unsigned long new_uniform_page(void *pool, unsigned long poolsize,
481 unsigned long page, metalen;
484 page = alloc_get_pages(pool, poolsize, 1, 1);
488 metalen = uniform_metalen(usize);
490 /* Get metadata for page. */
491 metadata = new_metadata(pool, poolsize, metalen, UNIFORM);
493 alloc_free_pages(pool, page);
497 encode_usize(metadata, usize);
499 BUILD_ASSERT(FREE == 0);
500 memset(metadata + 2, 0, metalen - 2);
502 /* Actually, this is a subpage page now. */
503 set_page_state(pool, page, SPECIAL);
505 /* Set metadata pointer for page. */
506 set_page_metadata(pool, page, metadata);
511 static unsigned long alloc_sub_page(void *pool, unsigned long poolsize,
512 unsigned long size, unsigned long align)
514 unsigned long i, usize;
516 struct uniform_cache *uc = pool;
518 usize = suitable_for_uc(size, align);
520 /* Look for a uniform page. */
521 for (i = 0; i < UNIFORM_CACHE_NUM; i++) {
522 if (uc->size[i] == usize) {
524 ret = uniform_alloc(pool, poolsize, uc, i);
527 /* OK, that one is full, remove from cache. */
533 /* OK, try a new uniform page. Use random discard for now. */
534 i = random() % UNIFORM_CACHE_NUM;
535 maybe_transform_uniform_page(pool, uc->page[i]);
537 uc->page[i] = new_uniform_page(pool, poolsize, usize);
540 return uniform_alloc(pool, poolsize, uc, i);
545 /* Look for partial page. */
546 for (i = 0; i < poolsize / getpagesize(); i++) {
548 if (get_page_state(pool, i) != SPECIAL)
551 ret = sub_page_alloc(pool, i, size, align);
556 /* Create new SUBPAGE page. */
557 i = alloc_get_pages(pool, poolsize, 1, 1);
561 /* Get metadata for page. */
562 metadata = new_metadata(pool, poolsize, BITMAP_METALEN, BITMAP);
564 alloc_free_pages(pool, i);
568 /* Actually, this is a subpage page now. */
569 set_page_state(pool, i, SPECIAL);
571 /* Set metadata pointer for page. */
572 set_page_metadata(pool, i, metadata);
574 /* Do allocation like normal */
575 return sub_page_alloc(pool, i, size, align);
578 static bool bitmap_page_is_empty(uint8_t *meta)
582 /* Skip the header (first bit of metadata). */
583 for (i = 1; i < SUBPAGE_METAOFF/BITMAP_GRANULARITY+1; i++)
584 if (get_bit_pair(meta, i) != FREE)
590 static bool uniform_page_is_empty(uint8_t *meta)
592 unsigned int i, metalen;
594 metalen = uniform_metalen(decode_usize(meta));
596 /* Skip the header (first two bytes of metadata). */
597 for (i = 2; i < metalen + 2; i++) {
598 BUILD_ASSERT(FREE == 0);
605 static bool special_page_is_empty(void *pool, unsigned long page)
608 enum sub_metadata_type type;
610 meta = get_page_metadata(pool, page);
611 type = get_bit_pair(meta, 0);
615 return uniform_page_is_empty(meta);
617 return bitmap_page_is_empty(meta);
623 static void clear_special_metadata(void *pool, unsigned long page)
626 enum sub_metadata_type type;
628 meta = get_page_metadata(pool, page);
629 type = get_bit_pair(meta, 0);
633 /* First two bytes are the header, rest is already FREE */
634 BUILD_ASSERT(FREE == 0);
638 /* First two bits is the header. */
639 BUILD_ASSERT(BITMAP_METALEN > 1);
647 /* Returns true if we cleaned any pages. */
648 static bool clean_empty_subpages(void *pool, unsigned long poolsize)
651 bool progress = false;
653 for (i = 0; i < poolsize/getpagesize(); i++) {
654 if (get_page_state(pool, i) != SPECIAL)
657 if (special_page_is_empty(pool, i)) {
658 clear_special_metadata(pool, i);
659 set_page_state(pool, i, FREE);
666 /* Returns true if we cleaned any pages. */
667 static bool clean_metadata(void *pool, unsigned long poolsize)
669 struct metaheader *mh, *prev_mh = NULL;
670 bool progress = false;
672 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
675 unsigned long metalen = get_metalen(pool, poolsize, mh);
677 meta = (uint8_t *)(mh + 1);
678 BUILD_ASSERT(FREE == 0);
679 for (i = metalen - 1; i > 0; i--)
683 /* Completely empty? */
684 if (prev_mh && i == metalen) {
685 alloc_free_pages(pool,
686 pool_offset(pool, mh)/getpagesize());
687 prev_mh->next = mh->next;
693 /* Some pages at end are free? */
694 for (p = (uint8_t *)(mh+1) + metalen - getpagesize();
696 p -= getpagesize()) {
709 void *alloc_get(void *pool, unsigned long poolsize,
710 unsigned long size, unsigned long align)
712 bool subpage_clean = false, metadata_clean = false;
715 if (poolsize < MIN_SIZE)
719 /* Sub-page allocations have an overhead of ~12%. */
720 if (size + size/8 >= getpagesize() || align >= getpagesize()) {
721 unsigned long pages = div_up(size, getpagesize());
723 ret = alloc_get_pages(pool, poolsize, pages, align)
726 ret = alloc_sub_page(pool, poolsize, size, align);
729 return (char *)pool + ret;
731 /* Allocation failed: garbage collection. */
732 if (!subpage_clean) {
733 subpage_clean = true;
734 if (clean_empty_subpages(pool, poolsize))
738 if (!metadata_clean) {
739 metadata_clean = true;
740 if (clean_metadata(pool, poolsize))
744 /* FIXME: Compact metadata? */
748 static void bitmap_free(void *pool, unsigned long pagenum, unsigned long off,
751 assert(off % BITMAP_GRANULARITY == 0);
753 off /= BITMAP_GRANULARITY;
755 /* Offset by one because first bit is used for header. */
758 set_bit_pair(metadata, off++, FREE);
759 while (off < SUBPAGE_METAOFF / BITMAP_GRANULARITY
760 && get_bit_pair(metadata, off) == TAKEN)
761 set_bit_pair(metadata, off++, FREE);
764 static void uniform_free(void *pool, unsigned long pagenum, unsigned long off,
767 unsigned int usize, bit;
769 usize = decode_usize(metadata);
770 /* Must have been this size. */
771 assert(off % usize == 0);
777 /* Must have been allocated. */
778 assert(metadata[bit / CHAR_BIT] & (1 << (bit % CHAR_BIT)));
779 metadata[bit / CHAR_BIT] &= ~(1 << (bit % CHAR_BIT));
782 static void subpage_free(void *pool, unsigned long pagenum, void *free)
784 unsigned long off = (unsigned long)free % getpagesize();
785 uint8_t *metadata = get_page_metadata(pool, pagenum);
786 enum sub_metadata_type type;
788 type = get_bit_pair(metadata, 0);
790 assert(off < SUBPAGE_METAOFF);
794 bitmap_free(pool, pagenum, off, metadata);
797 uniform_free(pool, pagenum, off, metadata);
804 void alloc_free(void *pool, unsigned long poolsize, void *free)
806 unsigned long pagenum;
807 struct metaheader *mh;
812 assert(poolsize >= MIN_SIZE);
814 mh = first_mheader(pool, poolsize);
815 assert((char *)free >= (char *)(mh + 1));
816 assert((char *)pool + poolsize > (char *)free);
818 pagenum = pool_offset(pool, free) / getpagesize();
820 if (get_page_state(pool, pagenum) == SPECIAL)
821 subpage_free(pool, pagenum, free);
823 assert((unsigned long)free % getpagesize() == 0);
824 alloc_free_pages(pool, pagenum);
828 static bool is_metadata_page(void *pool, unsigned long poolsize,
831 struct metaheader *mh;
833 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
834 unsigned long start, end;
836 start = pool_offset(pool, mh);
837 end = pool_offset(pool, (char *)(mh+1)
838 + get_metalen(pool, poolsize, mh));
839 if (page >= start/getpagesize() && page < end/getpagesize())
845 static bool check_bitmap_metadata(void *pool, unsigned long *mhoff)
847 enum alloc_state last_state = FREE;
850 for (i = 0; i < SUBPAGE_METAOFF / BITMAP_GRANULARITY; i++) {
851 enum alloc_state state;
853 /* +1 because header is the first byte. */
854 state = get_bit_pair((uint8_t *)pool + *mhoff, i+1);
859 if (last_state == FREE)
870 static bool check_uniform_metadata(void *pool, unsigned long *mhoff)
872 uint8_t *meta = (uint8_t *)pool + *mhoff;
873 unsigned int i, usize;
874 struct uniform_cache *uc = pool;
876 usize = decode_usize(meta);
877 if (usize == 0 || suitable_for_uc(usize, 1) != usize)
880 /* If it's in uniform cache, make sure that agrees on size. */
881 for (i = 0; i < UNIFORM_CACHE_NUM; i++) {
887 ucm = get_page_metadata(pool, uc->page[i]);
891 if (usize != uc->size[i])
897 static bool check_subpage(void *pool, unsigned long poolsize,
900 unsigned long *mhoff = metadata_off(pool, page);
902 if (*mhoff + sizeof(struct metaheader) > poolsize)
905 if (*mhoff % ALIGNOF(struct metaheader) != 0)
908 /* It must point to a metadata page. */
909 if (!is_metadata_page(pool, poolsize, *mhoff / getpagesize()))
912 /* Header at start of subpage allocation */
913 switch (get_bit_pair((uint8_t *)pool + *mhoff, 0)) {
915 return check_bitmap_metadata(pool, mhoff);
917 return check_uniform_metadata(pool, mhoff);
924 bool alloc_check(void *pool, unsigned long poolsize)
927 struct metaheader *mh;
928 enum alloc_state last_state = FREE;
929 bool was_metadata = false;
931 if (poolsize < MIN_SIZE)
934 if (get_page_state(pool, 0) != TAKEN_START)
937 /* First check metadata pages. */
938 /* Metadata pages will be marked TAKEN. */
939 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
940 unsigned long start, end;
942 start = pool_offset(pool, mh);
943 if (start + sizeof(*mh) > poolsize)
946 end = pool_offset(pool, (char *)(mh+1)
947 + get_metalen(pool, poolsize, mh));
951 /* Non-first pages should start on a page boundary. */
952 if (mh != first_mheader(pool, poolsize)
953 && start % getpagesize() != 0)
956 /* It should end on a page boundary. */
957 if (end % getpagesize() != 0)
961 for (i = 0; i < poolsize / getpagesize(); i++) {
962 enum alloc_state state = get_page_state(pool, i);
963 bool is_metadata = is_metadata_page(pool, poolsize,i);
967 /* metadata pages are never free. */
973 /* This should continue a previous block. */
974 if (last_state == FREE)
976 if (is_metadata != was_metadata)
980 /* Check metadata pointer etc. */
981 if (!check_subpage(pool, poolsize, i))
985 was_metadata = is_metadata;
990 void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
992 struct metaheader *mh;
993 struct uniform_cache *uc = pool;
994 unsigned long pagebitlen, metadata_pages, count[1<<BITS_PER_PAGE], tot;
997 if (poolsize < MIN_SIZE) {
998 fprintf(out, "Pool smaller than %u: no content\n", MIN_SIZE);
1003 for (i = 0; i < UNIFORM_CACHE_NUM; i++)
1004 tot += (uc->size[i] != 0);
1005 fprintf(out, "Uniform cache (%lu entries):\n", tot);
1006 for (i = 0; i < UNIFORM_CACHE_NUM; i++) {
1007 unsigned int j, total = 0;
1013 /* First two bytes are header. */
1014 meta = get_page_metadata(pool, uc->page[i]) + 2;
1016 for (j = 0; j < SUBPAGE_METAOFF / uc->size[i]; j++)
1017 if (meta[j / 8] & (1 << (j % 8)))
1020 printf(" %u: %u/%u (%u%% density)\n",
1021 uc->size[j], total, SUBPAGE_METAOFF / uc->size[i],
1022 (total * 100) / (SUBPAGE_METAOFF / uc->size[i]));
1025 memset(count, 0, sizeof(count));
1026 for (i = 0; i < poolsize / getpagesize(); i++)
1027 count[get_page_state(pool, i)]++;
1029 mh = first_mheader(pool, poolsize);
1030 pagebitlen = (uint8_t *)mh - get_page_statebits(pool);
1031 fprintf(out, "%lu bytes of page bits: FREE/TAKEN/TAKEN_START/SUBPAGE = %lu/%lu/%lu/%lu\n",
1032 pagebitlen, count[0], count[1], count[2], count[3]);
1034 /* One metadata page for every page of page bits. */
1035 metadata_pages = div_up(pagebitlen, getpagesize());
1037 /* Now do each metadata page. */
1038 for (; mh; mh = next_mheader(pool,mh)) {
1039 unsigned long free = 0, bitmapblocks = 0, uniformblocks = 0,
1040 len = 0, uniformlen = 0, bitmaplen = 0, metalen;
1041 uint8_t *meta = (uint8_t *)(mh + 1);
1043 metalen = get_metalen(pool, poolsize, mh);
1044 metadata_pages += (sizeof(*mh) + metalen) / getpagesize();
1046 for (i = 0; i < metalen * CHAR_BIT / BITS_PER_PAGE; i += len) {
1047 switch (get_bit_pair(meta, i)) {
1053 /* Skip over this allocated part. */
1054 len = BITMAP_METALEN * CHAR_BIT;
1059 /* Skip over this part. */
1060 len = decode_usize(meta + i * BITS_PER_PAGE / CHAR_BIT);
1061 len = uniform_metalen(len) * CHAR_BIT / BITS_PER_PAGE;
1070 fprintf(out, "Metadata %lu-%lu: %lu free, %lu bitmapblocks, %lu uniformblocks, %lu%% density\n",
1071 pool_offset(pool, mh),
1072 pool_offset(pool, (char *)(mh+1) + metalen),
1073 free, bitmapblocks, uniformblocks,
1074 (bitmaplen + uniformlen) * 100
1075 / (free + bitmaplen + uniformlen));
1078 /* Account for total pages allocated. */
1079 tot = (count[1] + count[2] - metadata_pages) * getpagesize();
1081 fprintf(out, "Total metadata bytes = %lu\n",
1082 metadata_pages * getpagesize());
1084 /* Now do every subpage. */
1085 for (i = 0; i < poolsize / getpagesize(); i++) {
1087 unsigned int j, allocated;
1088 enum sub_metadata_type type;
1090 if (get_page_state(pool, i) != SPECIAL)
1093 memset(count, 0, sizeof(count));
1095 meta = get_page_metadata(pool, i);
1096 type = get_bit_pair(meta, 0);
1098 if (type == BITMAP) {
1099 for (j = 0; j < SUBPAGE_METAOFF/BITMAP_GRANULARITY; j++)
1100 count[get_page_state(meta, j)]++;
1101 allocated = (count[1] + count[2]) * BITMAP_GRANULARITY;
1102 fprintf(out, "Subpage bitmap ");
1104 unsigned int usize = decode_usize(meta);
1106 assert(type == UNIFORM);
1107 fprintf(out, "Subpage uniform (%u) ", usize);
1109 for (j = 0; j < SUBPAGE_METAOFF / usize; j++)
1110 count[!!(meta[j / 8] & (1 << (j % 8)))]++;
1111 allocated = count[1] * usize;
1113 fprintf(out, "%lu: FREE/TAKEN/TAKEN_START = %lu/%lu/%lu %u%% density\n",
1114 i, count[0], count[1], count[2],
1115 allocated * 100 / getpagesize());
1119 /* This is optimistic, since we overalloc in several cases. */
1120 fprintf(out, "Best possible allocation density = %lu%%\n",
1121 tot * 100 / poolsize);