--- /dev/null
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+
+/**
+ * block_pool - An efficient allocator for blocks that don't need to be
+ * resized or freed.
+ *
+ * block_pool allocates blocks by packing them into buffers, making the
+ * overhead per block virtually zero. Because of this, you cannot resize or
+ * free individual blocks, but you can free the entire block_pool.
+ *
+ * The rationale behind block_pool is that talloc uses a lot of bytes per
+ * block (48 on 32-bit, 80 on 64-bit). Nevertheless, talloc is an excellent
+ * tool for C programmers of all ages. Because a block_pool is a talloc
+ * context, it can be useful in talloc-based applications where many small
+ * blocks need to be allocated.
+ *
+ * Example:
+ *
+ * #include <ccan/block_pool/block_pool.h>
+ *
+ * int main(void) {
+ * struct block_pool *bp = block_pool_new(NULL);
+ *
+ * void *buffer = block_pool_alloc(bp, 4096);
+ * char *string = block_pool_strdup(bp, "A string");
+ *
+ * int array[] = {0,1,1,2,3,5,8,13,21,34};
+ * int *array_copy = block_pool_memdup(bp, array, sizeof(array));
+ *
+ * block_pool_free(bp);
+ * return 0;
+ * }
+ *
+ * Author: Joey Adams
+ * License: BSD
+ */
+int main(int argc, char *argv[])
+{
+ /* Expect exactly one argument */
+ if (argc != 2)
+ return 1;
+
+ if (strcmp(argv[1], "depends") == 0) {
+ printf("ccan/talloc\n");
+ return 0;
+ }
+
+ return 1;
+}
--- /dev/null
+/*
+ Copyright (c) 2009 Joseph A. Adams
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "block_pool.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+//must be a power of 2
+#define BLOCK_SIZE 4096
+
+struct block {
+ size_t remaining;
+ size_t size;
+ char *data;
+};
+
+struct block_pool {
+ size_t count;
+ size_t alloc; //2^n - 1, where n is an integer > 1
+ struct block *block;
+
+ //blocks are arranged in a max-heap by the .remaining field
+ // (except the root block does not percolate down until it is filled)
+};
+
+static int destructor(struct block_pool *bp) {
+ struct block *block = bp->block;
+ size_t d = bp->count;
+
+ for (;d--;block++)
+ free(block->data);
+ free(bp->block);
+
+ return 0;
+}
+
+struct block_pool *block_pool_new(void *ctx) {
+ struct block_pool *bp = talloc(ctx, struct block_pool);
+ talloc_set_destructor(bp, destructor);
+
+ bp->count = 0;
+ bp->alloc = 7;
+ bp->block = malloc(bp->alloc * sizeof(struct block));
+
+ return bp;
+}
+
+static void *new_block(struct block *b, size_t needed) {
+ b->size = (needed+(BLOCK_SIZE-1)) & ~(BLOCK_SIZE-1);
+ b->remaining = b->size - needed;
+ b->data = malloc(b->size);
+ return b->data;
+}
+
+//for the first block, keep the memory usage low in case it's the only block.
+static void *new_block_tiny(struct block *b, size_t needed) {
+ if (needed < 256)
+ b->size = 256;
+ else
+ b->size = (needed+(BLOCK_SIZE-1)) & ~(BLOCK_SIZE-1);
+ b->remaining = b->size - needed;
+ b->data = malloc(b->size);
+ return b->data;
+}
+
+static void *try_block(struct block *b, size_t size, size_t align) {
+ size_t offset = b->size - b->remaining;
+ offset = (offset+align) & ~align;
+
+ if (b->size-offset >= size) {
+ //good, we can use this block
+ void *ret = b->data + offset;
+ b->remaining = b->size-offset-size;
+
+ return ret;
+ }
+
+ return NULL;
+}
+
+#define L(node) (node+node+1)
+#define R(node) (node+node+2)
+#define P(node) ((node-1)>>1)
+
+#define V(node) (bp->block[node].remaining)
+
+static void percolate_down(struct block_pool *bp, size_t node) {
+ size_t child = L(node);
+ struct block tmp;
+
+ //get the maximum child
+ if (child >= bp->count)
+ return;
+ if (child+1 < bp->count && V(child+1) > V(child))
+ child++;
+
+ if (V(child) <= V(node))
+ return;
+
+ tmp = bp->block[node];
+ bp->block[node] = bp->block[child];
+ bp->block[child] = tmp;
+
+ percolate_down(bp, child);
+}
+
+//note: percolates up to either 1 or 2 as a root
+static void percolate_up(struct block_pool *bp, size_t node) {
+ size_t parent = P(node);
+ struct block tmp;
+
+ if (node<3 || V(parent) >= V(node))
+ return;
+
+ tmp = bp->block[node];
+ bp->block[node] = bp->block[parent];
+ bp->block[parent] = tmp;
+
+ percolate_up(bp, parent);
+}
+
+void *block_pool_alloc_align(struct block_pool *bp, size_t size, size_t align) {
+ void *ret;
+
+ if (align)
+ align--;
+
+ //if there aren't any blocks, make a new one
+ if (!bp->count) {
+ bp->count = 1;
+ return new_block_tiny(bp->block, size);
+ }
+
+ //try the root block
+ ret = try_block(bp->block, size, align);
+ if (ret)
+ return ret;
+
+ //root block is filled, percolate down and try the biggest one
+ percolate_down(bp, 0);
+ ret = try_block(bp->block, size, align);
+ if (ret)
+ return ret;
+
+ //the biggest wasn't big enough; we need a new block
+ if (bp->count >= bp->alloc) {
+ //make room for another block
+ bp->alloc += bp->alloc;
+ bp->alloc++;
+ bp->block = realloc(bp->block, bp->alloc * sizeof(struct block));
+ }
+ ret = new_block(bp->block+(bp->count++), size);
+
+ //fix the heap after adding the new block
+ percolate_up(bp, bp->count-1);
+
+ return ret;
+}
+
+#undef L
+#undef R
+#undef P
+#undef V
+
+char *block_pool_strdup(struct block_pool *bp, const char *str) {
+ size_t size = strlen(str)+1;
+ char *ret = block_pool_alloc_align(bp, size, 1);
+
+ memcpy(ret, str, size);
+ return ret;
+}
--- /dev/null
+/*
+ Copyright (c) 2009 Joseph A. Adams
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ 3. The name of the author may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CCAN_BLOCK_POOL
+#define CCAN_BLOCK_POOL
+
+#include <ccan/talloc/talloc.h>
+#include <string.h>
+
+struct block_pool;
+
+/* Construct a new block pool.
+ ctx is a talloc context (or NULL if you don't know what talloc is ;) ) */
+struct block_pool *block_pool_new(void *ctx);
+
+/* Same as block_pool_alloc, but allows you to manually specify alignment.
+ For instance, strings need not be aligned, so set align=1 for them.
+ align must be a power of two. */
+void *block_pool_alloc_align(struct block_pool *bp, size_t size, size_t align);
+
+/* Allocate a block of a given size. The returned pointer will remain valid
+ for the life of the block_pool. The block cannot be resized or
+ freed individually. */
+static inline void *block_pool_alloc(struct block_pool *bp, size_t size) {
+ size_t align = size & -size; //greatest power of two by which size is divisible
+ if (align > 16)
+ align = 16;
+ return block_pool_alloc_align(bp, size, align);
+}
+
+static inline void block_pool_free(struct block_pool *bp) {
+ talloc_free(bp);
+}
+
+
+char *block_pool_strdup(struct block_pool *bp, const char *str);
+
+static inline void *block_pool_memdup(struct block_pool *bp, const void *src, size_t size) {
+ void *ret = block_pool_alloc(bp, size);
+ memcpy(ret, src, size);
+ return ret;
+}
+
+#endif
--- /dev/null
+#include "block_pool/block_pool.h"
+#include "block_pool/block_pool.c"
+#include "tap/tap.h"
+
+struct alloc_record {
+ size_t size;
+ char *ptr;
+};
+
+static int compar_alloc_record_by_ptr(const void *ap, const void *bp) {
+ const struct alloc_record *a=ap, *b=bp;
+
+ if (a->ptr < b->ptr)
+ return -1;
+ else if (a->ptr > b->ptr)
+ return 1;
+ else
+ return 0;
+}
+
+static size_t random_block_size(void) {
+ int scale = random() % 11;
+ switch (scale) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4: return random() % 25;
+ case 5:
+ case 6:
+ case 7: return random() % 100;
+ case 8:
+ case 9: return random() % 1000;
+ case 10: return random() % 10000;
+ default:
+ fprintf(stderr, "random() %% 3 returned %d somehow!\n", scale);
+ exit(EXIT_FAILURE);
+ }
+}
+
+#define L(node) (node+node+1)
+#define R(node) (node+node+2)
+#define P(node) ((node-1)>>1)
+
+#define V(node) (bp->block[node].remaining)
+
+//used by test_block_pool to make sure the pool's block array is a max heap
+//set node=0 to scan the whole heap (starting at the root)
+//returns nonzero on success
+static int check_heap(struct block_pool *bp, size_t node) {
+ if (node < bp->count) {
+ if (node) { //the root node need not be the max, but its subtrees must be valid
+ if (L(node) < bp->count && V(L(node)) > V(node))
+ return 0;
+ if (R(node) < bp->count && V(R(node)) > V(node))
+ return 0;
+ }
+ return check_heap(bp, L(node)) && check_heap(bp, R(node));
+ } else
+ return 1;
+}
+
+#undef L
+#undef R
+#undef P
+#undef V
+
+/* Performs a self-test of block_pool.
+ Returns 1 on success, 0 on failure.
+ If verify_heap is nonzero, the test will check the heap structure every
+ single allocation, making test_block_pool take n^2 time. */
+static int test_block_pool(size_t blocks_to_try, FILE *out, int verify_heap) {
+ struct block_pool *bp = block_pool_new(NULL);
+ struct alloc_record *record = malloc(sizeof(*record) * blocks_to_try);
+ size_t i;
+ size_t bytes_allocated = 0;
+ #define print(...) do { \
+ if (out) \
+ printf(__VA_ARGS__); \
+ } while(0)
+
+ print("Allocating %zu blocks...\n", blocks_to_try);
+
+ for (i=0; i<blocks_to_try; i++) {
+ record[i].size = random_block_size();
+ record[i].ptr = block_pool_alloc(bp, record[i].size);
+
+ bytes_allocated += record[i].size;
+
+ memset(record[i].ptr, 0x55, record[i].size);
+
+ if (verify_heap && !check_heap(bp, 0)) {
+ print("Block pool's max-heap is wrong (allocation %zu)\n", i);
+ return 0;
+ }
+ }
+
+ print("Finished allocating\n"
+ " %zu blocks\n"
+ " %zu bytes\n"
+ " %zu pages\n",
+ blocks_to_try, bytes_allocated, bp->count);
+
+ qsort(record, blocks_to_try,
+ sizeof(*record), compar_alloc_record_by_ptr);
+
+ print("Making sure block ranges are unique...\n");
+ //print("0: %p ... %p\n", record[0].ptr, record[0].ptr+record[0].size);
+ for (i=1; i<blocks_to_try; i++) {
+ struct alloc_record *a = &record[i-1];
+ struct alloc_record *b = &record[i];
+
+ //print("%zu: %p ... %p\n", i, b->ptr, b->ptr+b->size);
+
+ if (a->ptr > b->ptr) {
+ struct alloc_record *tmp = a;
+ a = b;
+ b = tmp;
+ }
+
+ if (a->ptr <= b->ptr && a->ptr+a->size <= b->ptr)
+ continue;
+
+ print("Allocations %zu and %zu overlap\n", i-1, i);
+ return 0;
+ }
+
+ print("Checking heap structure...\n");
+ if (!check_heap(bp, 0)) {
+ print("Block pool's max-heap is wrong\n");
+ return 0;
+ }
+
+ block_pool_free(bp);
+ free(record);
+
+ return 1;
+
+ #undef print
+}
+
+
+int main(void)
+{
+ plan_tests(1);
+
+ //test a few blocks with heap verification
+ ok1(test_block_pool(10000, NULL, 1));
+
+ return exit_status();
+}