#include <assert.h>
#include <stdlib.h>
#include "alloc.h"
+#include "bitops.h"
+#include "tiny.h"
#include <ccan/build_assert/build_assert.h>
#include <ccan/likely/likely.h>
#include <ccan/short_types/short_types.h>
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#if 0 /* Until we have the tiny allocator working, go down to 1 MB */
+#if 0 /* Until we have the tiny allocator working, go down to 64k */
/* We divide the pool into this many large pages (nearest power of 2) */
-#define MAX_PAGES (1024UL)
+#define MAX_LARGE_PAGES (1024UL)
/* 32 small pages == 1 large page. */
#define BITS_FROM_SMALL_TO_LARGE_PAGE 5
#else
-#define MAX_PAGES (128UL)
+#define MAX_LARGE_PAGES (128UL)
#define BITS_FROM_SMALL_TO_LARGE_PAGE 4
#endif
-/* Smallest pool size for this scheme: 512-byte small pages. That's
- * 3/5% overhead for 32/64 bit. */
-#define MIN_USEFUL_SIZE (MAX_PAGES << (9 + BITS_FROM_SMALL_TO_LARGE_PAGE))
+#define MAX_SMALL_PAGES (MAX_LARGE_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE)
+
+/* Smallest pool size for this scheme: 128-byte small pages. That's
+ * 9/13% overhead for 32/64 bit. */
+#define MIN_USEFUL_SIZE (MAX_SMALL_PAGES * 128)
/* Every 4 buckets, we jump up a power of 2. ...8 10 12 14 16 20 24 28 32... */
#define INTER_BUCKET_SPACE 4
#define BITS_PER_LONG (sizeof(long) * CHAR_BIT)
struct bucket_state {
- unsigned long elements_per_page;
+ u32 elements_per_page;
u16 page_list;
u16 full_list;
};
struct header {
/* Bitmap of which pages are large. */
- unsigned long pagesize[MAX_PAGES / BITS_PER_LONG];
+ unsigned long pagesize[MAX_LARGE_PAGES / BITS_PER_LONG];
/* List of unused small/large pages. */
u16 small_free_list;
struct page_header {
u16 next, prev;
- u32 elements_used;
- /* FIXME: Pack this in somewhere... */
- u8 bucket;
+ /* FIXME: We can just count all-0 and all-1 used[] elements. */
+ unsigned elements_used : 25;
+ unsigned bucket : 7;
unsigned long used[1]; /* One bit per element. */
};
-/* 2 bit for every byte to allocate. */
-static void tiny_alloc_init(void *pool, unsigned long poolsize)
-{
-/* FIXME */
-}
-
-static void *tiny_alloc_get(void *pool, unsigned long poolsize,
- unsigned long size, unsigned long align)
-{
-/* FIXME */
- return NULL;
-}
-
-static void tiny_alloc_free(void *pool, unsigned long poolsize, void *free)
-{
-/* FIXME */
-}
-
-static unsigned long tiny_alloc_size(void *pool, unsigned long poolsize,
- void *p)
-{
-/* FIXME */
- return 0;
-}
-
-static bool tiny_alloc_check(void *pool, unsigned long poolsize)
-{
-/* FIXME */
- return true;
-}
-
-static unsigned int fls(unsigned long val)
-{
-#if HAVE_BUILTIN_CLZL
- /* This is significantly faster! */
- return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
-#else
- unsigned int r = 32;
-
- if (!val)
- return 0;
- if (!(val & 0xffff0000u)) {
- val <<= 16;
- r -= 16;
- }
- if (!(val & 0xff000000u)) {
- val <<= 8;
- r -= 8;
- }
- if (!(val & 0xf0000000u)) {
- val <<= 4;
- r -= 4;
- }
- if (!(val & 0xc0000000u)) {
- val <<= 2;
- r -= 2;
- }
- if (!(val & 0x80000000u)) {
- val <<= 1;
- r -= 1;
- }
- return r;
-#endif
-}
-
-/* FIXME: Move to bitops. */
-static unsigned int ffsl(unsigned long val)
-{
-#if HAVE_BUILTIN_FFSL
- /* This is significantly faster! */
- return __builtin_ffsl(val);
-#else
- unsigned int r = 1;
-
- if (!val)
- return 0;
- if (sizeof(long) == sizeof(u64)) {
- if (!(val & 0xffffffff)) {
- /* Workaround gcc warning on 32-bit:
- error: right shift count >= width of type */
- u64 tmp = val;
- tmp >>= 32;
- val = tmp;
- r += 32;
- }
- }
- if (!(val & 0xffff)) {
- val >>= 16;
- r += 16;
- }
- if (!(val & 0xff)) {
- val >>= 8;
- r += 8;
- }
- if (!(val & 0xf)) {
- val >>= 4;
- r += 4;
- }
- if (!(val & 3)) {
- val >>= 2;
- r += 2;
- }
- if (!(val & 1)) {
- val >>= 1;
- r += 1;
- }
- return r;
-#endif
-}
-
-static unsigned int popcount(unsigned long val)
-{
-#if HAVE_BUILTIN_POPCOUNTL
- return __builtin_popcountl(val);
-#else
- if (sizeof(long) == sizeof(u64)) {
- u64 v = val;
- v = (v & 0x5555555555555555ULL)
- + ((v >> 1) & 0x5555555555555555ULL);
- v = (v & 0x3333333333333333ULL)
- + ((v >> 1) & 0x3333333333333333ULL);
- v = (v & 0x0F0F0F0F0F0F0F0FULL)
- + ((v >> 1) & 0x0F0F0F0F0F0F0F0FULL);
- v = (v & 0x00FF00FF00FF00FFULL)
- + ((v >> 1) & 0x00FF00FF00FF00FFULL);
- v = (v & 0x0000FFFF0000FFFFULL)
- + ((v >> 1) & 0x0000FFFF0000FFFFULL);
- v = (v & 0x00000000FFFFFFFFULL)
- + ((v >> 1) & 0x00000000FFFFFFFFULL);
- return v;
- }
- val = (val & 0x55555555ULL) + ((val >> 1) & 0x55555555ULL);
- val = (val & 0x33333333ULL) + ((val >> 1) & 0x33333333ULL);
- val = (val & 0x0F0F0F0FULL) + ((val >> 1) & 0x0F0F0F0FULL);
- val = (val & 0x00FF00FFULL) + ((val >> 1) & 0x00FF00FFULL);
- val = (val & 0x0000FFFFULL) + ((val >> 1) & 0x0000FFFFULL);
- return val;
-#endif
-}
-
/*
* Every 4 buckets, the size doubles.
* Between buckets, sizes increase linearly.
+ ((overshoot * INTER_BUCKET_SPACE + (1 << base)-1) >> base);
}
-static unsigned int large_page_bits(unsigned long poolsize)
+static unsigned int small_page_bits(unsigned long poolsize)
{
- return fls(poolsize / MAX_PAGES / 2);
-}
-
-static unsigned long align_up(unsigned long x, unsigned long align)
-{
- return (x + align - 1) & ~(align - 1);
+ return fls(poolsize / MAX_SMALL_PAGES / 2);
}
static struct page_header *from_pgnum(struct header *head,
}
/* We rely on page numbers fitting in 16 bit. */
- BUILD_ASSERT((MAX_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE) < 65536);
+ BUILD_ASSERT(MAX_SMALL_PAGES < 65536);
- lp_bits = large_page_bits(poolsize);
- sp_bits = lp_bits - BITS_FROM_SMALL_TO_LARGE_PAGE;
+ sp_bits = small_page_bits(poolsize);
+ lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
num_buckets = max_bucket(lp_bits);
size = 1;
bucket = size_to_bucket(size);
- if (bucket >= max_bucket(large_page_bits(poolsize))) {
+ sp_bits = small_page_bits(poolsize);
+
+ if (bucket >= max_bucket(sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE)) {
/* FIXME: huge alloc. */
return NULL;
}
bs = &head->bs[bucket];
- sp_bits = large_page_bits(poolsize) - BITS_FROM_SMALL_TO_LARGE_PAGE;
if (!bs->page_list) {
struct page_header *ph;
}
/* Get page header. */
- sp_bits = large_page_bits(poolsize) - BITS_FROM_SMALL_TO_LARGE_PAGE;
+ sp_bits = small_page_bits(poolsize);
pgnum = offset >> sp_bits;
/* Big page? Round down further. */
return tiny_alloc_size(pool, poolsize, p);
/* Get page header. */
- sp_bits = large_page_bits(poolsize) - BITS_FROM_SMALL_TO_LARGE_PAGE;
+ sp_bits = small_page_bits(poolsize);
pgnum = offset >> sp_bits;
/* Big page? Round down further. */
struct page_header *ph;
unsigned long taken, i, prev, pagesize, sp_bits, lp_bits;
- lp_bits = large_page_bits(poolsize);
- sp_bits = lp_bits - BITS_FROM_SMALL_TO_LARGE_PAGE;
+ sp_bits = small_page_bits(poolsize);
+ lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
lp_bucket = large_page_bucket(bindex, sp_bits);
struct header *head = pool;
unsigned long prev, i, lp_bits, sp_bits, header_size, num_buckets;
struct page_header *ph;
- unsigned long pages[(MAX_PAGES << BITS_FROM_SMALL_TO_LARGE_PAGE)
- / BITS_PER_LONG] = { 0 };
+ unsigned long pages[MAX_SMALL_PAGES / BITS_PER_LONG] = { 0 };
if (poolsize < MIN_USEFUL_SIZE)
return tiny_alloc_check(pool, poolsize);
- lp_bits = large_page_bits(poolsize);
- sp_bits = lp_bits - BITS_FROM_SMALL_TO_LARGE_PAGE;
+ sp_bits = small_page_bits(poolsize);
+ lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
num_buckets = max_bucket(lp_bits);
return overhead;
}
-static void tiny_alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
-{
-}
-
void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
{
struct header *head = pool;
return;
}
- lp_bits = large_page_bits(poolsize);
- sp_bits = lp_bits - BITS_FROM_SMALL_TO_LARGE_PAGE;
+ sp_bits = small_page_bits(poolsize);
+ lp_bits = sp_bits + BITS_FROM_SMALL_TO_LARGE_PAGE;
num_buckets = max_bucket(lp_bits);
header_size = sizeof(*head) + sizeof(head->bs) * (num_buckets-1);