12 #include "antithread.h"
13 #include <ccan/noerr/noerr.h>
14 #include <ccan/talloc/talloc.h>
15 #include <ccan/read_write_all/read_write_all.h>
16 #include <ccan/alloc/alloc.h>
17 #include <ccan/list/list.h>
19 /* FIXME: Valgrind support should be possible for some cases. Tricky
20 * case is where another process allocates for you, but at worst we
21 * could reset what is valid and what isn't on every entry into the
22 * library or something. */
24 static LIST_HEAD(pools);
26 /* Talloc destroys parents before children (damn Tridge's failing destructors!)
27 * so we need the first child (ie. last-destroyed) to actually clean up. */
28 struct at_pool_contents {
29 struct list_node list;
31 unsigned long poolsize;
33 int parent_rfd, parent_wfd;
38 struct at_pool_contents *p;
47 /* FIXME: Better locking through futexes. */
48 static void lock(int fd, unsigned long off)
53 fl.l_whence = SEEK_SET;
57 while (fcntl(fd, F_SETLKW, &fl) < 0) {
59 err(1, "Failure locking antithread file");
63 static void unlock(int fd, unsigned long off)
69 fl.l_whence = SEEK_SET;
73 fcntl(fd, F_SETLK, &fl);
77 /* This pointer is in a pool. Find which one. */
78 static struct at_pool_contents *find_pool(const void *ptr)
80 struct at_pool_contents *p;
82 list_for_each(&pools, p, list) {
83 /* Special case for initial allocation: ptr *is* pool */
87 if ((char *)ptr >= (char *)p->pool
88 && (char *)ptr < (char *)p->pool + p->poolsize)
94 static int destroy_pool(struct at_pool_contents *p)
97 munmap(p->pool, p->poolsize);
100 close(p->parent_wfd);
104 static void *at_realloc(const void *parent, void *ptr, size_t size)
106 struct at_pool_contents *p = find_pool(parent);
107 /* FIXME: realloc in ccan/alloc? */
111 alloc_free(p->pool, p->poolsize, ptr);
113 } else if (ptr == NULL) {
114 /* FIXME: Alignment */
115 new = alloc_get(p->pool, p->poolsize, size, 16);
117 if (size <= alloc_size(p->pool, p->poolsize, ptr))
120 new = alloc_get(p->pool, p->poolsize, size, 16);
123 alloc_size(p->pool, p->poolsize, ptr));
124 alloc_free(p->pool, p->poolsize, ptr);
132 static struct at_pool_contents *locked;
133 static void talloc_lock(const void *ptr)
135 struct at_pool_contents *p = find_pool(ptr);
142 static void talloc_unlock(void)
144 struct at_pool_contents *p = locked;
150 /* We add 16MB to size. This compensates for address randomization. */
151 #define PADDING (16 * 1024 * 1024)
153 /* Create a new sharable pool. */
154 struct at_pool *at_pool(unsigned long size)
158 struct at_pool_contents *p;
161 /* FIXME: How much should we actually add for overhead?. */
162 size += 32 * getpagesize();
164 /* Round up to whole pages. */
165 size = (size + getpagesize()-1) & ~(getpagesize()-1);
177 if (ftruncate(fd, size + PADDING) != 0)
180 atp = talloc(NULL, struct at_pool);
184 atp->p = p = talloc(NULL, struct at_pool_contents);
188 /* First map gets a nice big area. */
189 p->pool = mmap(NULL, size+PADDING, PROT_READ|PROT_WRITE, MAP_SHARED, fd,
191 if (p->pool == MAP_FAILED)
194 /* Then we remap into the middle of it. */
195 munmap(p->pool, size+PADDING);
196 p->pool = mmap(p->pool + PADDING/2, size, PROT_READ|PROT_WRITE,
198 if (p->pool == MAP_FAILED)
203 p->parent_rfd = p->parent_wfd = -1;
205 alloc_init(p->pool, p->poolsize);
206 list_add(&pools, &p->list);
207 talloc_set_destructor(p, destroy_pool);
209 atp->ctx = talloc_add_external(atp,
210 at_realloc, talloc_lock, talloc_unlock);
222 /* Talloc off this to allocate from within the pool. */
223 const void *at_pool_ctx(struct at_pool *atp)
228 static int cant_destroy_self(struct athread *at)
230 /* Perhaps this means we want to detach, but it doesn't really
236 static int destroy_at(struct athread *at)
238 /* If it is already a zombie, this is harmless. */
239 kill(at->pid, SIGTERM);
244 /* FIXME: Should we do SIGKILL if process doesn't exit soon? */
245 if (waitpid(at->pid, NULL, 0) != at->pid)
246 err(1, "Waiting for athread %p (pid %u)", at, at->pid);
251 /* Sets up thread and forks it. NULL on error. */
252 static struct athread *fork_thread(struct at_pool *atp)
256 struct at_pool_contents *pool = atp->p;
258 /* You can't already be a child of this pool. */
259 if (pool->parent_rfd != -1)
260 errx(1, "Can't create antithread on this pool: we're one");
262 /* We don't want this allocated *in* the pool. */
263 at = talloc_steal(atp, talloc(NULL, struct athread));
279 pool->parent_rfd = p2c[0];
280 pool->parent_wfd = c2p[1];
281 talloc_set_destructor(at, cant_destroy_self);
288 talloc_set_destructor(at, destroy_at);
303 /* Creating an antithread via fork() */
304 struct athread *_at_run(struct at_pool *atp,
305 void *(*fn)(struct at_pool *, void *),
310 at = fork_thread(atp);
316 at_tell_parent(atp, fn(atp, obj));
323 static unsigned int num_args(char *const argv[])
327 for (i = 0; argv[i]; i++);
331 /* Fork and execvp, with added arguments for child to grab. */
332 struct athread *at_spawn(struct at_pool *atp, void *arg, char *cmdline[])
337 at = fork_thread(atp);
343 char *argv[num_args(cmdline) + 2];
344 argv[0] = cmdline[0];
345 argv[1] = talloc_asprintf(NULL, "AT:%p/%lu/%i/%i/%i/%p",
346 atp->p->pool, atp->p->poolsize,
347 atp->p->fd, atp->p->parent_rfd,
348 atp->p->parent_wfd, arg);
349 /* Copy including NULL terminator. */
350 memcpy(&argv[2], &cmdline[1], num_args(cmdline)*sizeof(char *));
351 execvp(argv[0], argv);
354 write_all(atp->p->parent_wfd, &err, sizeof(err));
358 /* Child should always write an error code (or 0). */
359 if (read(at->rfd, &err, sizeof(err)) != sizeof(err)) {
372 /* The fd to poll on */
373 int at_fd(struct athread *at)
378 /* What's the antithread saying? Blocks if fd not ready. */
379 void *at_read(struct athread *at)
383 switch (read(at->rfd, &ret, sizeof(ret))) {
385 err(1, "Reading from athread %p (pid %u)", at, at->pid);
392 /* Should never happen. */
393 err(1, "Short read from athread %p (pid %u)", at, at->pid);
397 /* Say something to a child. */
398 void at_tell(struct athread *at, const void *status)
400 if (write(at->wfd, &status, sizeof(status)) != sizeof(status))
401 err(1, "Failure writing to athread %p (pid %u)", at, at->pid);
404 /* For child to grab arguments from command line (removes them) */
405 struct at_pool *at_get_pool(int *argc, char *argv[], void **arg)
407 struct at_pool *atp = talloc(NULL, struct at_pool);
408 struct at_pool_contents *p;
417 /* If they don't care, use dummy value. */
421 p = atp->p = talloc(atp, struct at_pool_contents);
423 if (sscanf(argv[1], "AT:%p/%lu/%i/%i/%i/%p",
424 &p->pool, &p->poolsize, &p->fd,
425 &p->parent_rfd, &p->parent_wfd, arg) != 6) {
430 /* FIXME: To try to adjust for address space randomization, we
431 * could re-exec a few times. */
432 map = mmap(p->pool, p->poolsize, PROT_READ|PROT_WRITE, MAP_SHARED,
434 if (map != p->pool) {
435 fprintf(stderr, "Mapping %lu bytes @%p gave %p\n",
436 p->poolsize, p->pool, map);
441 list_add(&pools, &p->list);
442 talloc_set_destructor(p, destroy_pool);
445 atp->ctx = talloc_add_external(atp,
446 at_realloc, talloc_lock, talloc_unlock);
450 /* Tell parent we're good. */
452 if (write(p->parent_wfd, &err, sizeof(err)) != sizeof(err)) {
458 memmove(&argv[1], &argv[2], --(*argc));
467 /* Say something to our parent (async). */
468 void at_tell_parent(struct at_pool *atp, const void *status)
470 if (atp->p->parent_wfd == -1)
471 errx(1, "This process is not an antithread of this pool");
473 if (write(atp->p->parent_wfd, &status, sizeof(status))!=sizeof(status))
474 err(1, "Failure writing to parent");
477 /* What's the parent saying? Blocks if fd not ready. */
478 void *at_read_parent(struct at_pool *atp)
482 if (atp->p->parent_rfd == -1)
483 errx(1, "This process is not an antithread of this pool");
485 switch (read(atp->p->parent_rfd, &ret, sizeof(ret))) {
487 err(1, "Reading from parent");
494 /* Should never happen. */
495 err(1, "Short read from parent");
499 /* The fd to poll on */
500 int at_parent_fd(struct at_pool *atp)
502 if (atp->p->parent_rfd == -1)
503 errx(1, "This process is not an antithread of this pool");
505 return atp->p->parent_rfd;
508 /* FIXME: Futexme. */
509 void at_lock(void *obj)
511 struct at_pool *atp = talloc_find_parent_bytype(obj, struct at_pool);
515 /* This isn't required yet, but ensures it's a talloc ptr */
516 l = talloc_lock_ptr(obj);
519 lock(atp->p->fd, (char *)obj - (char *)atp->p->pool);
523 errx(1, "Object %p was already locked (something died?)", obj);
528 void at_unlock(void *obj)
530 struct at_pool *atp = talloc_find_parent_bytype(obj, struct at_pool);
534 l = talloc_lock_ptr(obj);
536 errx(1, "Object %p was already unlocked", obj);
539 unlock(atp->p->fd, (char *)obj - (char *)atp->p->pool);
542 void at_lock_all(struct at_pool *atp)
547 void at_unlock_all(struct at_pool *atp)
549 unlock(atp->p->fd, 0);