1 /* Licensed under GPLv3+ - see LICENSE file for details */
13 #include "antithread.h"
14 #include <ccan/err/err.h>
15 #include <ccan/noerr/noerr.h>
16 #include <ccan/talloc/talloc.h>
17 #include <ccan/read_write_all/read_write_all.h>
18 #include <ccan/antithread/alloc/alloc.h>
19 #include <ccan/list/list.h>
21 /* FIXME: Valgrind support should be possible for some cases. Tricky
22 * case is where another process allocates for you, but at worst we
23 * could reset what is valid and what isn't on every entry into the
24 * library or something. */
26 static LIST_HEAD(pools);
28 /* Talloc destroys parents before children (damn Tridge's failing destructors!)
29 * so we need the first child (ie. last-destroyed) to actually clean up. */
30 struct at_pool_contents {
31 struct list_node list;
33 unsigned long poolsize;
35 int parent_rfd, parent_wfd;
40 struct at_pool_contents *p;
49 /* FIXME: Better locking through futexes. */
50 static void lock(int fd, unsigned long off)
55 fl.l_whence = SEEK_SET;
59 while (fcntl(fd, F_SETLKW, &fl) < 0) {
61 err(1, "Failure locking antithread file");
65 static void unlock(int fd, unsigned long off)
71 fl.l_whence = SEEK_SET;
75 fcntl(fd, F_SETLK, &fl);
79 /* This pointer is in a pool. Find which one. */
80 static struct at_pool_contents *find_pool(const void *ptr)
82 struct at_pool_contents *p;
84 list_for_each(&pools, p, list) {
85 /* Special case for initial allocation: ptr *is* pool */
89 if ((char *)ptr >= (char *)p->pool
90 && (char *)ptr < (char *)p->pool + p->poolsize)
96 static int destroy_pool(struct at_pool_contents *p)
99 munmap(p->pool, p->poolsize);
101 close(p->parent_rfd);
102 close(p->parent_wfd);
106 static void *at_realloc(const void *parent, void *ptr, size_t size)
108 struct at_pool_contents *p = find_pool(parent);
109 /* FIXME: realloc in ccan/alloc? */
113 alloc_free(p->pool, p->poolsize, ptr);
115 } else if (ptr == NULL) {
116 /* FIXME: Alignment */
117 new = alloc_get(p->pool, p->poolsize, size, 16);
119 if (size <= alloc_size(p->pool, p->poolsize, ptr))
122 new = alloc_get(p->pool, p->poolsize, size, 16);
125 alloc_size(p->pool, p->poolsize, ptr));
126 alloc_free(p->pool, p->poolsize, ptr);
134 static struct at_pool_contents *locked;
135 static void talloc_lock(const void *ptr)
137 struct at_pool_contents *p = find_pool(ptr);
144 static void talloc_unlock(void)
146 struct at_pool_contents *p = locked;
152 /* We add 16MB to size. This compensates for address randomization. */
153 #define PADDING (16 * 1024 * 1024)
155 /* Create a new sharable pool. */
156 struct at_pool *at_pool(unsigned long size)
160 struct at_pool_contents *p;
163 /* FIXME: How much should we actually add for overhead?. */
164 size += 32 * getpagesize();
166 /* Round up to whole pages. */
167 size = (size + getpagesize()-1) & ~(getpagesize()-1);
179 if (ftruncate(fd, size + PADDING) != 0)
182 atp = talloc(NULL, struct at_pool);
186 atp->p = p = talloc(NULL, struct at_pool_contents);
190 /* First map gets a nice big area. */
191 p->pool = mmap(NULL, size+PADDING, PROT_READ|PROT_WRITE, MAP_SHARED, fd,
193 if (p->pool == MAP_FAILED)
196 /* Then we remap into the middle of it. */
197 munmap(p->pool, size+PADDING);
198 p->pool = mmap((char *)p->pool + PADDING/2, size, PROT_READ|PROT_WRITE,
200 if (p->pool == MAP_FAILED)
205 p->parent_rfd = p->parent_wfd = -1;
207 alloc_init(p->pool, p->poolsize);
208 list_add(&pools, &p->list);
209 talloc_set_destructor(p, destroy_pool);
211 atp->ctx = talloc_add_external(atp,
212 at_realloc, talloc_lock, talloc_unlock);
224 /* Talloc off this to allocate from within the pool. */
225 const void *at_pool_ctx(struct at_pool *atp)
230 static int cant_destroy_self(struct athread *at)
232 /* Perhaps this means we want to detach, but it doesn't really
238 static int destroy_at(struct athread *at)
240 /* If it is already a zombie, this is harmless. */
241 kill(at->pid, SIGTERM);
246 /* FIXME: Should we do SIGKILL if process doesn't exit soon? */
247 if (waitpid(at->pid, NULL, 0) != at->pid)
248 err(1, "Waiting for athread %p (pid %u)", at, at->pid);
253 /* Sets up thread and forks it. NULL on error. */
254 static struct athread *fork_thread(struct at_pool *atp)
258 struct at_pool_contents *pool = atp->p;
260 /* You can't already be a child of this pool. */
261 if (pool->parent_rfd != -1)
262 errx(1, "Can't create antithread on this pool: we're one");
264 /* We don't want this allocated *in* the pool. */
265 at = talloc_steal(atp, talloc(NULL, struct athread));
281 pool->parent_rfd = p2c[0];
282 pool->parent_wfd = c2p[1];
283 talloc_set_destructor(at, cant_destroy_self);
290 talloc_set_destructor(at, destroy_at);
305 /* Creating an antithread via fork() */
306 struct athread *_at_run(struct at_pool *atp,
307 void *(*fn)(struct at_pool *, void *),
312 at = fork_thread(atp);
318 at_tell_parent(atp, fn(atp, obj));
325 static unsigned int num_args(char *const argv[])
329 for (i = 0; argv[i]; i++);
333 /* Fork and execvp, with added arguments for child to grab. */
334 struct athread *at_spawn(struct at_pool *atp, void *arg, char *cmdline[])
339 at = fork_thread(atp);
345 char *argv[num_args(cmdline) + 2];
346 argv[0] = cmdline[0];
347 argv[1] = talloc_asprintf(NULL, "AT:%p/%lu/%i/%i/%i/%p",
348 atp->p->pool, atp->p->poolsize,
349 atp->p->fd, atp->p->parent_rfd,
350 atp->p->parent_wfd, arg);
351 /* Copy including NULL terminator. */
352 memcpy(&argv[2], &cmdline[1], num_args(cmdline)*sizeof(char *));
353 execvp(argv[0], argv);
356 write_all(atp->p->parent_wfd, &err, sizeof(err));
360 /* Child should always write an error code (or 0). */
361 if (read(at->rfd, &err, sizeof(err)) != sizeof(err)) {
374 /* The fd to poll on */
375 int at_fd(struct athread *at)
380 /* What's the antithread saying? Blocks if fd not ready. */
381 void *at_read(struct athread *at)
385 switch (read(at->rfd, &ret, sizeof(ret))) {
387 err(1, "Reading from athread %p (pid %u)", at, at->pid);
394 /* Should never happen. */
395 err(1, "Short read from athread %p (pid %u)", at, at->pid);
399 /* Say something to a child. */
400 void at_tell(struct athread *at, const void *status)
402 if (write(at->wfd, &status, sizeof(status)) != sizeof(status))
403 err(1, "Failure writing to athread %p (pid %u)", at, at->pid);
406 /* For child to grab arguments from command line (removes them) */
407 struct at_pool *at_get_pool(int *argc, char *argv[], void **arg)
409 struct at_pool *atp = talloc(NULL, struct at_pool);
410 struct at_pool_contents *p;
419 /* If they don't care, use dummy value. */
423 p = atp->p = talloc(atp, struct at_pool_contents);
425 if (sscanf(argv[1], "AT:%p/%lu/%i/%i/%i/%p",
426 &p->pool, &p->poolsize, &p->fd,
427 &p->parent_rfd, &p->parent_wfd, arg) != 6) {
432 /* FIXME: To try to adjust for address space randomization, we
433 * could re-exec a few times. */
434 map = mmap(p->pool, p->poolsize, PROT_READ|PROT_WRITE, MAP_SHARED,
436 if (map != p->pool) {
437 fprintf(stderr, "Mapping %lu bytes @%p gave %p\n",
438 p->poolsize, p->pool, map);
443 list_add(&pools, &p->list);
444 talloc_set_destructor(p, destroy_pool);
447 atp->ctx = talloc_add_external(atp,
448 at_realloc, talloc_lock, talloc_unlock);
452 /* Tell parent we're good. */
454 if (write(p->parent_wfd, &err, sizeof(err)) != sizeof(err)) {
460 memmove(&argv[1], &argv[2], --(*argc));
469 /* Say something to our parent (async). */
470 void at_tell_parent(struct at_pool *atp, const void *status)
472 if (atp->p->parent_wfd == -1)
473 errx(1, "This process is not an antithread of this pool");
475 if (write(atp->p->parent_wfd, &status, sizeof(status))!=sizeof(status))
476 err(1, "Failure writing to parent");
479 /* What's the parent saying? Blocks if fd not ready. */
480 void *at_read_parent(struct at_pool *atp)
484 if (atp->p->parent_rfd == -1)
485 errx(1, "This process is not an antithread of this pool");
487 switch (read(atp->p->parent_rfd, &ret, sizeof(ret))) {
489 err(1, "Reading from parent");
496 /* Should never happen. */
497 err(1, "Short read from parent");
501 /* The fd to poll on */
502 int at_parent_fd(struct at_pool *atp)
504 if (atp->p->parent_rfd == -1)
505 errx(1, "This process is not an antithread of this pool");
507 return atp->p->parent_rfd;
510 /* FIXME: Futexme. */
511 void at_lock(void *obj)
513 struct at_pool *atp = talloc_find_parent_bytype(obj, struct at_pool);
517 /* This isn't required yet, but ensures it's a talloc ptr */
518 l = talloc_lock_ptr(obj);
521 lock(atp->p->fd, (char *)obj - (char *)atp->p->pool);
525 errx(1, "Object %p was already locked (something died?)", obj);
530 void at_unlock(void *obj)
532 struct at_pool *atp = talloc_find_parent_bytype(obj, struct at_pool);
536 l = talloc_lock_ptr(obj);
538 errx(1, "Object %p was already unlocked", obj);
541 unlock(atp->p->fd, (char *)obj - (char *)atp->p->pool);
544 void at_lock_all(struct at_pool *atp)
549 void at_unlock_all(struct at_pool *atp)
551 unlock(atp->p->fd, 0);