8 #include "build_assert/build_assert.h"
11 /* FIXME: We assume getpagesize() doesnt change. Remapping file with
12 * different pagesize should still work. */
14 #define ALIGNOF(t) __alignof__(t)
16 /* Alignment by measuring structure padding. */
17 #define ALIGNOF(t) (sizeof(struct { char c; t _h; }) - 1 - sizeof(t))
20 /* FIXME: Doesn't handle non-page-aligned poolsize. */
23 #define MIN_SIZE (getpagesize() * 2)
25 /* What's the granularity of sub-page allocs? */
26 #define BITMAP_GRANULARITY 4
30 * file := pagestates pad uniform-cache metadata
31 * pagestates := pages * 2-bits-per-page
32 * pad := pad to next ALIGNOF(metaheader)
34 * metadata := metalen next-ptr metabits
35 * metabits := freeblock | bitblock | uniformblock
37 * bitblock := BITMAP + 2-bits-per-bit-in-page + pad-to-byte
38 * uniformblock := UNIFORM + 14-bit-byte-len + bits + pad-to-byte
40 #define UNIFORM_CACHE_NUM 16
43 uint16_t size[UNIFORM_CACHE_NUM];
44 /* These could be u32 if we're prepared to limit size. */
45 unsigned long page[UNIFORM_CACHE_NUM];
50 /* Next meta header, or 0 */
52 /* Bits start here. */
55 /* Assumes a is a power of two. */
56 static unsigned long align_up(unsigned long x, unsigned long a)
58 return (x + a - 1) & ~(a - 1);
61 static unsigned long align_down(unsigned long x, unsigned long a)
66 static unsigned long div_up(unsigned long x, unsigned long a)
68 return (x + a - 1) / a;
71 /* It turns out that we spend a lot of time dealing with bit pairs.
72 * These routines manipulate them.
74 static uint8_t get_bit_pair(const uint8_t *bits, unsigned long index)
76 return bits[index * 2 / CHAR_BIT] >> (index * 2 % CHAR_BIT) & 3;
79 static void set_bit_pair(uint8_t *bits, unsigned long index, uint8_t val)
81 bits[index * 2 / CHAR_BIT] &= ~(3 << (index * 2 % CHAR_BIT));
82 bits[index * 2 / CHAR_BIT] |= (val << (index * 2 % CHAR_BIT));
85 /* This is used for page states and subpage allocations */
91 SPECIAL, /* Sub-page allocation for page states. */
94 /* The types for subpage metadata. */
95 enum sub_metadata_type
97 /* FREE is same as alloc state */
98 BITMAP = 1, /* bitmap allocated page */
99 UNIFORM, /* uniform size allocated page */
102 /* Page states are represented by bitpairs, at the start of the pool. */
103 #define BITS_PER_PAGE 2
105 /* How much metadata info per byte? */
106 #define METADATA_PER_BYTE (CHAR_BIT / 2)
108 static uint8_t *get_page_statebits(const void *pool)
110 return (uint8_t *)pool + sizeof(struct uniform_cache);
113 static enum alloc_state get_page_state(const void *pool, unsigned long page)
115 return get_bit_pair(get_page_statebits(pool), page);
118 static void set_page_state(void *pool, unsigned long page, enum alloc_state s)
120 set_bit_pair(get_page_statebits(pool), page, s);
123 /* The offset of metadata for a subpage allocation is found at the end
125 #define SUBPAGE_METAOFF (getpagesize() - sizeof(unsigned long))
127 /* This is the length of metadata in bits. It consists of two bits
128 * for every BITMAP_GRANULARITY of usable bytes in the page, then two
129 * bits for the tailer.. */
130 #define BITMAP_METABITLEN \
131 ((div_up(SUBPAGE_METAOFF, BITMAP_GRANULARITY) + 1) * BITS_PER_PAGE)
133 /* This is the length in bytes. */
134 #define BITMAP_METALEN (div_up(BITMAP_METABITLEN, CHAR_BIT))
136 static struct metaheader *first_mheader(void *pool, unsigned long poolsize)
138 unsigned int pagestatelen;
140 pagestatelen = align_up(div_up(poolsize/getpagesize() * BITS_PER_PAGE,
142 ALIGNOF(struct metaheader));
143 return (struct metaheader *)(get_page_statebits(pool) + pagestatelen);
146 static struct metaheader *next_mheader(void *pool, struct metaheader *mh)
151 return (struct metaheader *)((char *)pool + mh->next);
154 static unsigned long pool_offset(void *pool, void *p)
156 return (char *)p - (char *)pool;
159 void alloc_init(void *pool, unsigned long poolsize)
161 /* FIXME: Alignment assumptions about pool. */
162 unsigned long len, i;
163 struct metaheader *mh;
165 if (poolsize < MIN_SIZE)
168 mh = first_mheader(pool, poolsize);
170 /* Mark all page states FREE, all uniform caches zero, and all of
171 * metaheader bitmap which takes rest of first page. */
172 len = align_up(pool_offset(pool, mh + 1), getpagesize());
173 BUILD_ASSERT(FREE == 0);
174 memset(pool, 0, len);
176 /* Mark the pagestate and metadata page(s) allocated. */
177 set_page_state(pool, 0, TAKEN_START);
178 for (i = 1; i < div_up(len, getpagesize()); i++)
179 set_page_state(pool, i, TAKEN);
182 /* Two bits per element, representing page states. Returns 0 on fail.
183 * off is used to allocate from subpage bitmaps, which use the first 2
184 * bits as the type, so the real bitmap is offset by 1. */
185 static unsigned long alloc_from_bitmap(uint8_t *bits, unsigned long off,
187 unsigned long want, unsigned long align)
193 /* We allocate from far end, to increase ability to expand metadata. */
194 for (i = elems - 1; i >= 0; i--) {
195 switch (get_bit_pair(bits, off+i)) {
197 if (++free >= want) {
200 /* They might ask for large alignment. */
201 if (align && i % align)
204 set_bit_pair(bits, off+i, TAKEN_START);
205 for (j = i+1; j < i + want; j++)
206 set_bit_pair(bits, off+j, TAKEN);
221 static unsigned long alloc_get_pages(void *pool, unsigned long poolsize,
222 unsigned long pages, unsigned long align)
224 return alloc_from_bitmap(get_page_statebits(pool),
225 0, poolsize / getpagesize(), pages,
226 align / getpagesize());
229 /* Offset to metadata is at end of page. */
230 static unsigned long *metadata_off(void *pool, unsigned long page)
232 return (unsigned long *)
233 ((char *)pool + (page+1)*getpagesize() - sizeof(unsigned long));
236 static uint8_t *get_page_metadata(void *pool, unsigned long page)
238 return (uint8_t *)pool + *metadata_off(pool, page);
241 static void set_page_metadata(void *pool, unsigned long page, uint8_t *meta)
243 *metadata_off(pool, page) = meta - (uint8_t *)pool;
246 static unsigned long sub_page_alloc(void *pool, unsigned long page,
247 unsigned long size, unsigned long align)
249 uint8_t *bits = get_page_metadata(pool, page);
251 enum sub_metadata_type type;
253 type = get_bit_pair(bits, 0);
255 /* If this is a uniform page, we can't allocate from it. */
259 assert(type == BITMAP);
261 /* We use a standart bitmap, but offset because of that BITMAP
263 i = alloc_from_bitmap(bits, 1, SUBPAGE_METAOFF/BITMAP_GRANULARITY,
264 div_up(size, BITMAP_GRANULARITY),
265 align / BITMAP_GRANULARITY);
267 /* Can't allocate? */
271 /* i-1 because of the header. */
272 return page*getpagesize() + (i-1)*BITMAP_GRANULARITY;
275 /* We look at the page states to figure out where the allocation for this
277 static unsigned long get_metalen(void *pool, unsigned long poolsize,
278 struct metaheader *mh)
280 unsigned long i, first, pages = poolsize / getpagesize();
282 first = pool_offset(pool, mh + 1)/getpagesize();
284 for (i = first + 1; i < pages && get_page_state(pool,i) == TAKEN; i++);
286 return i * getpagesize() - pool_offset(pool, mh + 1);
289 static unsigned int uniform_metalen(unsigned int usize)
291 unsigned int metalen;
293 assert(usize < (1 << 14));
295 /* Two bits for the header, 14 bits for size, then one bit for each
296 * element the page can hold. Round up to number of bytes. */
297 metalen = div_up(2 + 14 + SUBPAGE_METAOFF / usize, CHAR_BIT);
299 /* To ensure metaheader is always aligned, round bytes up. */
300 metalen = align_up(metalen, ALIGNOF(struct metaheader));
305 static unsigned int decode_usize(uint8_t *meta)
307 return ((unsigned)meta[1] << (CHAR_BIT-2)) | (meta[0] >> 2);
310 static void encode_usize(uint8_t *meta, unsigned int usize)
312 meta[0] = (UNIFORM | (usize << 2));
313 meta[1] = (usize >> (CHAR_BIT - 2));
316 static uint8_t *alloc_metaspace(void *pool, unsigned long poolsize,
317 struct metaheader *mh, unsigned long bytes,
318 enum sub_metadata_type type)
320 uint8_t *meta = (uint8_t *)(mh + 1);
321 unsigned long free = 0, len, i, metalen;
323 metalen = get_metalen(pool, poolsize, mh);
325 /* Walk through metadata looking for free. */
326 for (i = 0; i < metalen * METADATA_PER_BYTE; i += len) {
327 switch (get_bit_pair(meta, i)) {
331 if (free == bytes * METADATA_PER_BYTE) {
332 /* Mark this as a bitmap. */
333 set_bit_pair(meta, i - free + 1, type);
334 return meta + (i - free + 1)/METADATA_PER_BYTE;
338 /* Skip over this allocated part. */
339 len = BITMAP_METALEN * METADATA_PER_BYTE;
343 /* Figure metalen given usize. */
344 len = decode_usize(meta + i / METADATA_PER_BYTE);
345 len = uniform_metalen(len) * METADATA_PER_BYTE;
356 /* We need this many bytes of metadata. */
357 static uint8_t *new_metadata(void *pool, unsigned long poolsize,
358 unsigned long bytes, enum sub_metadata_type type)
360 struct metaheader *mh, *newmh;
364 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh))
365 if ((meta = alloc_metaspace(pool, poolsize, mh, bytes, type)))
368 /* No room for metadata? Can we expand an existing one? */
369 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
370 unsigned long nextpage;
372 /* We start on this page. */
373 nextpage = pool_offset(pool, (char *)(mh+1))/getpagesize();
374 /* Iterate through any other pages we own. */
375 while (get_page_state(pool, ++nextpage) == TAKEN);
377 /* Now, can we grab that page? */
378 if (get_page_state(pool, nextpage) != FREE)
381 /* OK, expand metadata, do it again. */
382 set_page_state(pool, nextpage, TAKEN);
383 BUILD_ASSERT(FREE == 0);
384 memset((char *)pool + nextpage*getpagesize(), 0, getpagesize());
385 return alloc_metaspace(pool, poolsize, mh, bytes, type);
388 /* No metadata left at all? */
389 page = alloc_get_pages(pool, poolsize, div_up(bytes, getpagesize()), 1);
393 newmh = (struct metaheader *)((char *)pool + page * getpagesize());
394 BUILD_ASSERT(FREE == 0);
395 memset(newmh + 1, 0, getpagesize() - sizeof(*mh));
397 /* Sew it into linked list */
398 mh = first_mheader(pool,poolsize);
399 newmh->next = mh->next;
400 mh->next = pool_offset(pool, newmh);
402 return alloc_metaspace(pool, poolsize, newmh, bytes, type);
405 static void alloc_free_pages(void *pool, unsigned long pagenum)
407 assert(get_page_state(pool, pagenum) == TAKEN_START);
408 set_page_state(pool, pagenum, FREE);
409 while (get_page_state(pool, ++pagenum) == TAKEN)
410 set_page_state(pool, pagenum, FREE);
413 static void maybe_transform_uniform_page(void *pool, unsigned long offset)
415 /* FIXME: If possible and page isn't full, change to a bitmap */
418 /* Returns 0 or the size of the uniform alloc to use */
419 static unsigned long suitable_for_uc(unsigned long size, unsigned long align)
421 unsigned long num_elems, wastage, usize;
422 unsigned long bitmap_cost;
427 /* Fix up silly alignments. */
428 usize = align_up(size, align);
430 /* How many can fit in this page? */
431 num_elems = SUBPAGE_METAOFF / usize;
433 /* Can happen with bigger alignments. */
437 /* Usize maxes out at 14 bits. */
438 if (usize >= (1 << 14))
441 /* How many bytes would be left at the end? */
442 wastage = SUBPAGE_METAOFF % usize;
444 /* If we can get a larger allocation within alignment constraints, we
445 * should do it, otherwise might as well leave wastage at the end. */
446 usize += align_down(wastage / num_elems, align);
448 /* Bitmap allocation costs 2 bits per BITMAP_GRANULARITY bytes, plus
449 * however much we waste in rounding up to BITMAP_GRANULARITY. */
450 bitmap_cost = 2 * div_up(size, BITMAP_GRANULARITY)
451 + CHAR_BIT * (align_up(size, BITMAP_GRANULARITY) - size);
453 /* Our cost is 1 bit, plus usize overhead */
454 if (bitmap_cost < 1 + (usize - size) * CHAR_BIT)
460 static unsigned long uniform_alloc(void *pool, unsigned long poolsize,
461 struct uniform_cache *uc,
464 uint8_t *metadata = get_page_metadata(pool, uc->page[ucnum]) + 2;
465 unsigned long i, max;
467 /* Simple one-bit-per-object bitmap. */
468 max = SUBPAGE_METAOFF / uc->size[ucnum];
469 for (i = 0; i < max; i++) {
470 if (!(metadata[i / CHAR_BIT] & (1 << (i % CHAR_BIT)))) {
471 metadata[i / CHAR_BIT] |= (1 << (i % CHAR_BIT));
472 return uc->page[ucnum] * getpagesize()
473 + i * uc->size[ucnum];
480 static unsigned long new_uniform_page(void *pool, unsigned long poolsize,
483 unsigned long page, metalen;
486 page = alloc_get_pages(pool, poolsize, 1, 1);
490 metalen = uniform_metalen(usize);
492 /* Get metadata for page. */
493 metadata = new_metadata(pool, poolsize, metalen, UNIFORM);
495 alloc_free_pages(pool, page);
499 encode_usize(metadata, usize);
501 BUILD_ASSERT(FREE == 0);
502 memset(metadata + 2, 0, metalen - 2);
504 /* Actually, this is a subpage page now. */
505 set_page_state(pool, page, SPECIAL);
507 /* Set metadata pointer for page. */
508 set_page_metadata(pool, page, metadata);
513 static unsigned long alloc_sub_page(void *pool, unsigned long poolsize,
514 unsigned long size, unsigned long align)
516 unsigned long i, usize;
518 struct uniform_cache *uc = pool;
520 usize = suitable_for_uc(size, align);
522 /* Look for a uniform page. */
523 for (i = 0; i < UNIFORM_CACHE_NUM; i++) {
524 if (uc->size[i] == usize) {
526 ret = uniform_alloc(pool, poolsize, uc, i);
529 /* OK, that one is full, remove from cache. */
535 /* OK, try a new uniform page. Use random discard for now. */
536 i = random() % UNIFORM_CACHE_NUM;
537 maybe_transform_uniform_page(pool, uc->page[i]);
539 uc->page[i] = new_uniform_page(pool, poolsize, usize);
542 return uniform_alloc(pool, poolsize, uc, i);
547 /* Look for partial page. */
548 for (i = 0; i < poolsize / getpagesize(); i++) {
550 if (get_page_state(pool, i) != SPECIAL)
553 ret = sub_page_alloc(pool, i, size, align);
558 /* Create new SUBPAGE page. */
559 i = alloc_get_pages(pool, poolsize, 1, 1);
563 /* Get metadata for page. */
564 metadata = new_metadata(pool, poolsize, BITMAP_METALEN, BITMAP);
566 alloc_free_pages(pool, i);
570 /* Actually, this is a subpage page now. */
571 set_page_state(pool, i, SPECIAL);
573 /* Set metadata pointer for page. */
574 set_page_metadata(pool, i, metadata);
576 /* Do allocation like normal */
577 return sub_page_alloc(pool, i, size, align);
580 static bool bitmap_page_is_empty(uint8_t *meta)
584 /* Skip the header (first bit of metadata). */
585 for (i = 1; i < SUBPAGE_METAOFF/BITMAP_GRANULARITY+1; i++)
586 if (get_bit_pair(meta, i) != FREE)
592 static bool uniform_page_is_empty(uint8_t *meta)
594 unsigned int i, metalen;
596 metalen = uniform_metalen(decode_usize(meta));
598 /* Skip the header (first two bytes of metadata). */
599 for (i = 2; i < metalen + 2; i++) {
600 BUILD_ASSERT(FREE == 0);
607 static bool special_page_is_empty(void *pool, unsigned long page)
610 enum sub_metadata_type type;
612 meta = get_page_metadata(pool, page);
613 type = get_bit_pair(meta, 0);
617 return uniform_page_is_empty(meta);
619 return bitmap_page_is_empty(meta);
625 static void clear_special_metadata(void *pool, unsigned long page)
628 enum sub_metadata_type type;
630 meta = get_page_metadata(pool, page);
631 type = get_bit_pair(meta, 0);
635 /* First two bytes are the header, rest is already FREE */
636 BUILD_ASSERT(FREE == 0);
640 /* First two bits is the header. */
641 BUILD_ASSERT(BITMAP_METALEN > 1);
649 /* Returns true if we cleaned any pages. */
650 static bool clean_empty_subpages(void *pool, unsigned long poolsize)
653 bool progress = false;
655 for (i = 0; i < poolsize/getpagesize(); i++) {
656 if (get_page_state(pool, i) != SPECIAL)
659 if (special_page_is_empty(pool, i)) {
660 clear_special_metadata(pool, i);
661 set_page_state(pool, i, FREE);
668 /* Returns true if we cleaned any pages. */
669 static bool clean_metadata(void *pool, unsigned long poolsize)
671 struct metaheader *mh, *prev_mh = NULL;
672 bool progress = false;
674 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
677 unsigned long metalen = get_metalen(pool, poolsize, mh);
679 meta = (uint8_t *)(mh + 1);
680 BUILD_ASSERT(FREE == 0);
681 for (i = metalen - 1; i > 0; i--)
685 /* Completely empty? */
686 if (prev_mh && i == metalen) {
687 alloc_free_pages(pool,
688 pool_offset(pool, mh)/getpagesize());
689 prev_mh->next = mh->next;
695 /* Some pages at end are free? */
696 for (p = (uint8_t *)(mh+1) + metalen - getpagesize();
698 p -= getpagesize()) {
711 void *alloc_get(void *pool, unsigned long poolsize,
712 unsigned long size, unsigned long align)
714 bool subpage_clean = false, metadata_clean = false;
717 if (poolsize < MIN_SIZE)
721 /* Sub-page allocations have an overhead of ~12%. */
722 if (size + size/8 >= getpagesize() || align >= getpagesize()) {
723 unsigned long pages = div_up(size, getpagesize());
725 ret = alloc_get_pages(pool, poolsize, pages, align)
728 ret = alloc_sub_page(pool, poolsize, size, align);
731 return (char *)pool + ret;
733 /* Allocation failed: garbage collection. */
734 if (!subpage_clean) {
735 subpage_clean = true;
736 if (clean_empty_subpages(pool, poolsize))
740 if (!metadata_clean) {
741 metadata_clean = true;
742 if (clean_metadata(pool, poolsize))
746 /* FIXME: Compact metadata? */
750 static void bitmap_free(void *pool, unsigned long pagenum, unsigned long off,
753 assert(off % BITMAP_GRANULARITY == 0);
755 off /= BITMAP_GRANULARITY;
757 /* Offset by one because first bit is used for header. */
760 set_bit_pair(metadata, off++, FREE);
761 while (off < SUBPAGE_METAOFF / BITMAP_GRANULARITY
762 && get_bit_pair(metadata, off) == TAKEN)
763 set_bit_pair(metadata, off++, FREE);
766 static void uniform_free(void *pool, unsigned long pagenum, unsigned long off,
769 unsigned int usize, bit;
771 usize = decode_usize(metadata);
772 /* Must have been this size. */
773 assert(off % usize == 0);
779 /* Must have been allocated. */
780 assert(metadata[bit / CHAR_BIT] & (1 << (bit % CHAR_BIT)));
781 metadata[bit / CHAR_BIT] &= ~(1 << (bit % CHAR_BIT));
784 static void subpage_free(void *pool, unsigned long pagenum, void *free)
786 unsigned long off = (unsigned long)free % getpagesize();
787 uint8_t *metadata = get_page_metadata(pool, pagenum);
788 enum sub_metadata_type type;
790 type = get_bit_pair(metadata, 0);
792 assert(off < SUBPAGE_METAOFF);
796 bitmap_free(pool, pagenum, off, metadata);
799 uniform_free(pool, pagenum, off, metadata);
806 void alloc_free(void *pool, unsigned long poolsize, void *free)
808 unsigned long pagenum;
809 struct metaheader *mh;
814 assert(poolsize >= MIN_SIZE);
816 mh = first_mheader(pool, poolsize);
817 assert((char *)free >= (char *)(mh + 1));
818 assert((char *)pool + poolsize > (char *)free);
820 pagenum = pool_offset(pool, free) / getpagesize();
822 if (get_page_state(pool, pagenum) == SPECIAL)
823 subpage_free(pool, pagenum, free);
825 assert((unsigned long)free % getpagesize() == 0);
826 alloc_free_pages(pool, pagenum);
830 static bool is_metadata_page(void *pool, unsigned long poolsize,
833 struct metaheader *mh;
835 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
836 unsigned long start, end;
838 start = pool_offset(pool, mh);
839 end = pool_offset(pool, (char *)(mh+1)
840 + get_metalen(pool, poolsize, mh));
841 if (page >= start/getpagesize() && page < end/getpagesize())
847 static bool check_bitmap_metadata(void *pool, unsigned long *mhoff)
849 enum alloc_state last_state = FREE;
852 for (i = 0; i < SUBPAGE_METAOFF / BITMAP_GRANULARITY; i++) {
853 enum alloc_state state;
855 /* +1 because header is the first byte. */
856 state = get_bit_pair((uint8_t *)pool + *mhoff, i+1);
861 if (last_state == FREE)
872 static bool check_uniform_metadata(void *pool, unsigned long *mhoff)
874 uint8_t *meta = (uint8_t *)pool + *mhoff;
875 unsigned int i, usize;
876 struct uniform_cache *uc = pool;
878 usize = decode_usize(meta);
879 if (usize == 0 || suitable_for_uc(usize, 1) != usize)
882 /* If it's in uniform cache, make sure that agrees on size. */
883 for (i = 0; i < UNIFORM_CACHE_NUM; i++) {
889 ucm = get_page_metadata(pool, uc->page[i]);
893 if (usize != uc->size[i])
899 static bool check_subpage(void *pool, unsigned long poolsize,
902 unsigned long *mhoff = metadata_off(pool, page);
904 if (*mhoff + sizeof(struct metaheader) > poolsize)
907 if (*mhoff % ALIGNOF(struct metaheader) != 0)
910 /* It must point to a metadata page. */
911 if (!is_metadata_page(pool, poolsize, *mhoff / getpagesize()))
914 /* Header at start of subpage allocation */
915 switch (get_bit_pair((uint8_t *)pool + *mhoff, 0)) {
917 return check_bitmap_metadata(pool, mhoff);
919 return check_uniform_metadata(pool, mhoff);
926 bool alloc_check(void *pool, unsigned long poolsize)
929 struct metaheader *mh;
930 enum alloc_state last_state = FREE;
931 bool was_metadata = false;
933 if (poolsize < MIN_SIZE)
936 if (get_page_state(pool, 0) != TAKEN_START)
939 /* First check metadata pages. */
940 /* Metadata pages will be marked TAKEN. */
941 for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
942 unsigned long start, end;
944 start = pool_offset(pool, mh);
945 if (start + sizeof(*mh) > poolsize)
948 end = pool_offset(pool, (char *)(mh+1)
949 + get_metalen(pool, poolsize, mh));
953 /* Non-first pages should start on a page boundary. */
954 if (mh != first_mheader(pool, poolsize)
955 && start % getpagesize() != 0)
958 /* It should end on a page boundary. */
959 if (end % getpagesize() != 0)
963 for (i = 0; i < poolsize / getpagesize(); i++) {
964 enum alloc_state state = get_page_state(pool, i);
965 bool is_metadata = is_metadata_page(pool, poolsize,i);
969 /* metadata pages are never free. */
975 /* This should continue a previous block. */
976 if (last_state == FREE)
978 if (is_metadata != was_metadata)
982 /* Check metadata pointer etc. */
983 if (!check_subpage(pool, poolsize, i))
987 was_metadata = is_metadata;
992 void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
994 struct metaheader *mh;
995 struct uniform_cache *uc = pool;
996 unsigned long pagebitlen, metadata_pages, count[1<<BITS_PER_PAGE], tot;
999 if (poolsize < MIN_SIZE) {
1000 fprintf(out, "Pool smaller than %u: no content\n", MIN_SIZE);
1005 for (i = 0; i < UNIFORM_CACHE_NUM; i++)
1006 tot += (uc->size[i] != 0);
1007 fprintf(out, "Uniform cache (%lu entries):\n", tot);
1008 for (i = 0; i < UNIFORM_CACHE_NUM; i++) {
1009 unsigned int j, total = 0;
1015 /* First two bytes are header. */
1016 meta = get_page_metadata(pool, uc->page[i]) + 2;
1018 for (j = 0; j < SUBPAGE_METAOFF / uc->size[i]; j++)
1019 if (meta[j / 8] & (1 << (j % 8)))
1022 printf(" %u: %u/%u (%u%% density)\n",
1023 uc->size[j], total, SUBPAGE_METAOFF / uc->size[i],
1024 (total * 100) / (SUBPAGE_METAOFF / uc->size[i]));
1027 memset(count, 0, sizeof(count));
1028 for (i = 0; i < poolsize / getpagesize(); i++)
1029 count[get_page_state(pool, i)]++;
1031 mh = first_mheader(pool, poolsize);
1032 pagebitlen = (uint8_t *)mh - get_page_statebits(pool);
1033 fprintf(out, "%lu bytes of page bits: FREE/TAKEN/TAKEN_START/SUBPAGE = %lu/%lu/%lu/%lu\n",
1034 pagebitlen, count[0], count[1], count[2], count[3]);
1036 /* One metadata page for every page of page bits. */
1037 metadata_pages = div_up(pagebitlen, getpagesize());
1039 /* Now do each metadata page. */
1040 for (; mh; mh = next_mheader(pool,mh)) {
1041 unsigned long free = 0, bitmapblocks = 0, uniformblocks = 0,
1042 len = 0, uniformlen = 0, bitmaplen = 0, metalen;
1043 uint8_t *meta = (uint8_t *)(mh + 1);
1045 metalen = get_metalen(pool, poolsize, mh);
1046 metadata_pages += (sizeof(*mh) + metalen) / getpagesize();
1048 for (i = 0; i < metalen * METADATA_PER_BYTE; i += len) {
1049 switch (get_bit_pair(meta, i)) {
1055 /* Skip over this allocated part. */
1056 len = BITMAP_METALEN * CHAR_BIT;
1061 /* Skip over this part. */
1062 len = decode_usize(meta + i/METADATA_PER_BYTE);
1063 len = uniform_metalen(len) * METADATA_PER_BYTE;
1072 fprintf(out, "Metadata %lu-%lu: %lu free, %lu bitmapblocks, %lu uniformblocks, %lu%% density\n",
1073 pool_offset(pool, mh),
1074 pool_offset(pool, (char *)(mh+1) + metalen),
1075 free, bitmapblocks, uniformblocks,
1076 (bitmaplen + uniformlen) * 100
1077 / (free + bitmaplen + uniformlen));
1080 /* Account for total pages allocated. */
1081 tot = (count[1] + count[2] - metadata_pages) * getpagesize();
1083 fprintf(out, "Total metadata bytes = %lu\n",
1084 metadata_pages * getpagesize());
1086 /* Now do every subpage. */
1087 for (i = 0; i < poolsize / getpagesize(); i++) {
1089 unsigned int j, allocated;
1090 enum sub_metadata_type type;
1092 if (get_page_state(pool, i) != SPECIAL)
1095 memset(count, 0, sizeof(count));
1097 meta = get_page_metadata(pool, i);
1098 type = get_bit_pair(meta, 0);
1100 if (type == BITMAP) {
1101 for (j = 0; j < SUBPAGE_METAOFF/BITMAP_GRANULARITY; j++)
1102 count[get_page_state(meta, j)]++;
1103 allocated = (count[1] + count[2]) * BITMAP_GRANULARITY;
1104 fprintf(out, "Subpage bitmap ");
1106 unsigned int usize = decode_usize(meta);
1108 assert(type == UNIFORM);
1109 fprintf(out, "Subpage uniform (%u) ", usize);
1111 for (j = 0; j < SUBPAGE_METAOFF / usize; j++)
1112 count[!!(meta[j / 8] & (1 << (j % 8)))]++;
1113 allocated = count[1] * usize;
1115 fprintf(out, "%lu: FREE/TAKEN/TAKEN_START = %lu/%lu/%lu %u%% density\n",
1116 i, count[0], count[1], count[2],
1117 allocated * 100 / getpagesize());
1121 /* This is optimistic, since we overalloc in several cases. */
1122 fprintf(out, "Best possible allocation density = %lu%%\n",
1123 tot * 100 / poolsize);