11 #include "antithread.h"
12 #include "noerr/noerr.h"
13 #include "talloc/talloc.h"
14 #include "alloc/alloc.h"
16 /* FIXME: Valgrind support should be possible for some cases. Tricky
17 * case is where another process allocates for you, but at worst we
18 * could reset what is valid and what isn't on every entry into the
19 * library or something. */
25 unsigned long poolsize;
27 int parent_rfd, parent_wfd;
36 /* FIXME: Better locking through futexes. */
37 static void lock(int fd, unsigned long off)
42 fl.l_whence = SEEK_SET;
46 while (fcntl(fd, F_SETLKW, &fl) < 0) {
48 err(1, "Failure locking antithread file");
52 static void unlock(int fd, unsigned long off)
58 fl.l_whence = SEEK_SET;
62 fcntl(fd, F_SETLK, &fl);
66 static void *at_realloc(const void *parent, void *ptr, size_t size)
68 struct at_pool *p = talloc_find_parent_bytype(parent, struct at_pool);
69 /* FIXME: realloc in ccan/alloc? */
74 alloc_free(p->pool, p->poolsize, ptr);
76 } else if (ptr == NULL) {
77 /* FIXME: Alignment */
78 new = alloc_get(p->pool, p->poolsize, size, 16);
80 if (size <= alloc_size(p->pool, p->poolsize, ptr))
83 new = alloc_get(p->pool, p->poolsize, size, 16);
86 alloc_size(p->pool, p->poolsize, ptr));
87 alloc_free(p->pool, p->poolsize, ptr);
95 /* We add 16MB to size. This compensates for address randomization. */
96 #define PADDING (16 * 1024 * 1024)
98 /* Create a new sharable pool. */
99 struct at_pool *at_pool(unsigned long size)
105 /* FIXME: How much should we actually add for overhead?. */
106 size += 32 * getpagesize();
108 /* Round up to whole pages. */
109 size = (size + getpagesize()-1) & ~(getpagesize()-1);
121 if (ftruncate(fd, size + PADDING) != 0)
124 p = talloc(NULL, struct at_pool);
128 /* First map gets a nice big area. */
129 p->pool = mmap(NULL, size+PADDING, PROT_READ|PROT_WRITE, MAP_SHARED, fd,
131 if (p->pool == MAP_FAILED)
134 /* Then we remap into the middle of it. */
135 munmap(p->pool, size+PADDING);
136 p->pool = mmap(p->pool + PADDING/2, size, PROT_READ|PROT_WRITE,
138 if (p->pool == MAP_FAILED)
141 /* FIXME: Destructor? */
144 p->parent_rfd = p->parent_wfd = -1;
145 alloc_init(p->pool, p->poolsize);
147 p->ctx = talloc_add_external(p, at_realloc);
154 munmap(p->pool, size);
162 /* Talloc off this to allocate from within the pool. */
163 const void *at_pool_ctx(struct at_pool *atp)
168 static int cant_destroy_self(struct athread *at)
170 /* Perhaps this means we want to detach, but it doesn't really
176 static int destroy_at(struct athread *at)
178 /* If it is already a zombie, this is harmless. */
179 kill(at->pid, SIGTERM);
184 /* FIXME: Should we do SIGKILL if process doesn't exit soon? */
185 if (waitpid(at->pid, NULL, 0) != at->pid)
186 err(1, "Waiting for athread %p (pid %u)", at, at->pid);
191 /* Sets up thread and forks it. NULL on error. */
192 static struct athread *fork_thread(struct at_pool *pool)
197 /* You can't already be a child of this pool. */
198 if (pool->parent_rfd != -1)
199 errx(1, "Can't create antithread on this pool: we're one");
201 /* We don't want this allocated *in* the pool. */
202 at = talloc_steal(pool, talloc(NULL, struct athread));
218 pool->parent_rfd = p2c[0];
219 pool->parent_wfd = c2p[1];
220 talloc_set_destructor(at, cant_destroy_self);
227 talloc_set_destructor(at, destroy_at);
242 /* Creating an antithread via fork() */
243 struct athread *_at_run(struct at_pool *pool,
244 void *(*fn)(struct at_pool *, void *),
249 at = fork_thread(pool);
255 at_tell_parent(pool, fn(pool, obj));
262 static unsigned int num_args(char *const argv[])
266 for (i = 0; argv[i]; i++);
270 /* Fork and execvp, with added arguments for child to grab. */
271 struct athread *at_spawn(struct at_pool *pool, void *arg, char *cmdline[])
276 at = fork_thread(pool);
282 char *argv[num_args(cmdline) + 2];
283 argv[0] = cmdline[0];
284 argv[1] = talloc_asprintf(NULL, "AT:%p/%lu/%i/%i/%i/%p",
285 pool->pool, pool->poolsize,
286 pool->fd, pool->parent_rfd,
287 pool->parent_wfd, arg);
288 /* Copy including NULL terminator. */
289 memcpy(&argv[2], &cmdline[1], num_args(cmdline)*sizeof(char *));
290 execvp(argv[0], argv);
293 write(pool->parent_wfd, &err, sizeof(err));
297 /* Child should always write an error code (or 0). */
298 if (read(at->rfd, &err, sizeof(err)) != sizeof(err)) {
311 /* The fd to poll on */
312 int at_fd(struct athread *at)
317 /* What's the antithread saying? Blocks if fd not ready. */
318 void *at_read(struct athread *at)
322 switch (read(at->rfd, &ret, sizeof(ret))) {
324 err(1, "Reading from athread %p (pid %u)", at, at->pid);
331 /* Should never happen. */
332 err(1, "Short read from athread %p (pid %u)", at, at->pid);
336 /* Say something to a child. */
337 void at_tell(struct athread *at, const void *status)
339 if (write(at->wfd, &status, sizeof(status)) != sizeof(status))
340 err(1, "Failure writing to athread %p (pid %u)", at, at->pid);
343 /* For child to grab arguments from command line (removes them) */
344 struct at_pool *at_get_pool(int *argc, char *argv[], void **arg)
346 struct at_pool *p = talloc(NULL, struct at_pool);
355 /* If they don't care, use dummy value. */
359 if (sscanf(argv[1], "AT:%p/%lu/%i/%i/%i/%p",
360 &p->pool, &p->poolsize, &p->fd,
361 &p->parent_rfd, &p->parent_wfd, arg) != 6) {
366 /* FIXME: To try to adjust for address space randomization, we
367 * could re-exec a few times. */
368 map = mmap(p->pool, p->poolsize, PROT_READ|PROT_WRITE, MAP_SHARED,
370 if (map != p->pool) {
371 fprintf(stderr, "Mapping %lu bytes @%p gave %p\n",
372 p->poolsize, p->pool, map);
377 p->ctx = talloc_add_external(p, at_realloc);
381 /* Tell parent we're good. */
383 if (write(p->parent_wfd, &err, sizeof(err)) != sizeof(err)) {
389 memmove(&argv[1], &argv[2], --(*argc));
393 /* FIXME: cleanup properly. */
398 /* Say something to our parent (async). */
399 void at_tell_parent(struct at_pool *pool, const void *status)
401 if (pool->parent_wfd == -1)
402 errx(1, "This process is not an antithread of this pool");
404 if (write(pool->parent_wfd, &status, sizeof(status)) != sizeof(status))
405 err(1, "Failure writing to parent");
408 /* What's the parent saying? Blocks if fd not ready. */
409 void *at_read_parent(struct at_pool *pool)
413 if (pool->parent_rfd == -1)
414 errx(1, "This process is not an antithread of this pool");
416 switch (read(pool->parent_rfd, &ret, sizeof(ret))) {
418 err(1, "Reading from parent");
425 /* Should never happen. */
426 err(1, "Short read from parent");
430 /* The fd to poll on */
431 int at_parent_fd(struct at_pool *pool)
433 if (pool->parent_rfd == -1)
434 errx(1, "This process is not an antithread of this pool");
436 return pool->parent_rfd;
439 /* FIXME: Futexme. */
440 void at_lock(void *obj)
442 struct at_pool *p = talloc_find_parent_bytype(obj, struct at_pool);
446 /* This isn't required yet, but ensures it's a talloc ptr */
447 l = talloc_lock_ptr(obj);
450 lock(p->fd, (char *)obj - (char *)p->pool);
454 errx(1, "Object %p was already locked (something died?)", obj);
459 void at_unlock(void *obj)
461 struct at_pool *p = talloc_find_parent_bytype(obj, struct at_pool);
465 l = talloc_lock_ptr(obj);
467 errx(1, "Object %p was already unlocked", obj);
470 unlock(p->fd, (char *)obj - (char *)p->pool);
473 void at_lock_all(struct at_pool *p)
478 void at_unlock_all(struct at_pool *p)