]> git.ozlabs.org Git - ccan/blob - ccan/coroutine/coroutine.c
tal: allow notifiers on NULL.
[ccan] / ccan / coroutine / coroutine.c
1 /* GNU LGPL version 2 (or later) - see LICENSE file for details */
2 #include <assert.h>
3 #include <string.h>
4 #include <stdio.h>
5 #include <inttypes.h>
6 #include <stdlib.h>
7
8 #include <unistd.h>
9 #include <sys/mman.h>
10
11 #include <ccan/ptrint/ptrint.h>
12 #include <ccan/compiler/compiler.h>
13 #include <ccan/build_assert/build_assert.h>
14 #include <ccan/coroutine/coroutine.h>
15
16 /*
17  * Stack management
18  */
19
20 /* Returns lowest stack address, regardless of growth direction */
21 static UNNEEDED void *coroutine_stack_base(struct coroutine_stack *stack)
22 {
23 #if HAVE_STACK_GROWS_UPWARDS
24         return (char *)(stack + 1);
25 #else
26         return (char *)stack - stack->size;
27 #endif
28 }
29
30 #if HAVE_VALGRIND_MEMCHECK_H
31 #include <valgrind/memcheck.h>
32 static void vg_register_stack(struct coroutine_stack *stack)
33 {
34         char *base = coroutine_stack_base(stack);
35
36         VALGRIND_MAKE_MEM_UNDEFINED(base, stack->size);
37         stack->valgrind_id = VALGRIND_STACK_REGISTER(base,
38                                                      base + stack->size - 1);
39 }
40
41 static void vg_deregister_stack(struct coroutine_stack *stack)
42 {
43         VALGRIND_MAKE_MEM_UNDEFINED(coroutine_stack_base(stack), stack->size);
44         VALGRIND_STACK_DEREGISTER(stack->valgrind_id);
45 }
46 static bool vg_addressable(void *p, size_t len)
47 {
48         return !VALGRIND_CHECK_MEM_IS_ADDRESSABLE(p, len);
49 }
50 #else
51 #define vg_register_stack(stack)                do { } while (0)
52 #define vg_deregister_stack(stack)              do { } while (0)
53 #define vg_addressable(p, len)                  (true)
54 #endif
55
56 struct coroutine_stack *coroutine_stack_init(void *buf, size_t bufsize,
57                                              size_t metasize)
58 {
59         struct coroutine_stack *stack;
60         size_t size = bufsize - sizeof(*stack) - metasize;
61
62 #ifdef MINSIGSTKSZ
63         BUILD_ASSERT(COROUTINE_MIN_STKSZ >= MINSIGSTKSZ);
64 #endif
65
66         if (bufsize < (COROUTINE_MIN_STKSZ + sizeof(*stack) + metasize))
67                 return NULL;
68
69 #if HAVE_STACK_GROWS_UPWARDS
70         stack = (char *)buf + metasize;
71 #else
72         stack = (struct coroutine_stack *)
73                 ((char *)buf + bufsize - metasize) - 1;
74 #endif
75
76         stack->magic = COROUTINE_STACK_MAGIC_BUF;
77         stack->size = size;
78         vg_register_stack(stack);
79         return stack;
80 }
81
82 struct coroutine_stack *coroutine_stack_alloc(size_t totalsize, size_t metasize)
83 {
84         struct coroutine_stack *stack;
85         size_t pgsz = getpagesize();
86         size_t mapsize;
87         char *map, *guard;
88         int rc;
89
90         mapsize = ((totalsize + (pgsz - 1)) & ~(pgsz - 1)) + pgsz;
91
92         map = mmap(NULL, mapsize, PROT_READ | PROT_WRITE,
93                    MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
94         if (map == MAP_FAILED)
95                 return NULL;
96
97 #if HAVE_STACK_GROWS_UPWARDS
98         guard = map + mapsize - pgsz;
99         stack = (struct coroutine_stack *)(guard - totalsize + metasize);
100 #else
101         guard = map;
102         stack = (struct coroutine_stack *)(map + pgsz + totalsize - metasize)
103                 - 1;
104 #endif
105
106         rc = mprotect(guard, pgsz, PROT_NONE);
107         if (rc != 0) {
108                 munmap(map, mapsize);
109                 return NULL;
110         }
111
112         stack->magic = COROUTINE_STACK_MAGIC_ALLOC;
113         stack->size = totalsize - sizeof(*stack) - metasize;
114
115         vg_register_stack(stack);
116
117         return stack;
118 }
119
120 static void coroutine_stack_free(struct coroutine_stack *stack, size_t metasize)
121 {
122         void *map;
123         size_t pgsz = getpagesize();
124         size_t totalsize = stack->size + sizeof(*stack) + metasize;
125         size_t mapsize = ((totalsize + (pgsz - 1)) & ~(pgsz - 1)) + pgsz;
126
127 #if HAVE_STACK_GROWS_UPWARDS
128         map = (char *)(stack + 1) + stack->size + pgsz - mapsize;
129 #else
130         map = (char *)stack - stack->size - pgsz;
131 #endif
132
133         munmap(map, mapsize);
134 }
135
136 void coroutine_stack_release(struct coroutine_stack *stack, size_t metasize)
137 {
138         vg_deregister_stack(stack);
139
140         switch (stack->magic) {
141         case COROUTINE_STACK_MAGIC_BUF:
142                 memset(stack, 0, sizeof(*stack));
143                 break;
144
145         case COROUTINE_STACK_MAGIC_ALLOC:
146                 coroutine_stack_free(stack, metasize);
147                 break;
148
149         default:
150                 abort();
151         }
152 }
153
154 struct coroutine_stack *coroutine_stack_check(struct coroutine_stack *stack,
155                                               const char *abortstr)
156 {
157         if (stack && vg_addressable(stack, sizeof(*stack))
158             && ((stack->magic == COROUTINE_STACK_MAGIC_BUF)
159                 || (stack->magic == COROUTINE_STACK_MAGIC_ALLOC))
160             && (stack->size >= COROUTINE_MIN_STKSZ))
161                 return stack;
162
163         if (abortstr) {
164                 if (!stack)
165                         fprintf(stderr, "%s: NULL coroutine stack\n", abortstr);
166                 else
167                         fprintf(stderr,
168                                 "%s: Bad coroutine stack at %p (magic=0x%"PRIx64" size=%zd)\n",
169                                 abortstr, stack, stack->magic, stack->size);
170                 abort();
171         }
172         return NULL;
173 }
174
175 size_t coroutine_stack_size(const struct coroutine_stack *stack)
176 {
177         return stack->size;
178 }
179
180 #if HAVE_UCONTEXT
181 static void coroutine_uc_stack(stack_t *uc_stack,
182                                const struct coroutine_stack *stack)
183 {
184         uc_stack->ss_size = coroutine_stack_size(stack);
185         uc_stack->ss_sp = coroutine_stack_base((struct coroutine_stack *)stack);
186 }
187 #endif /* HAVE_UCONTEXT */
188
189 /*
190  * Coroutine switching
191  */
192
193 #if HAVE_UCONTEXT
194 void coroutine_init_(struct coroutine_state *cs,
195                      void (*fn)(void *), void *arg,
196                      struct coroutine_stack *stack)
197 {
198         getcontext (&cs->uc);
199
200         coroutine_uc_stack(&cs->uc.uc_stack, stack);
201
202         if (HAVE_POINTER_SAFE_MAKECONTEXT) {
203                 makecontext(&cs->uc, (void *)fn, 1, arg);
204         } else {
205                 ptrdiff_t si = ptr2int(arg);
206                 ptrdiff_t mask = (1UL << (sizeof(int) * 8)) - 1;
207                 int lo = si & mask;
208                 int hi = si >> (sizeof(int) * 8);
209
210                 makecontext(&cs->uc, (void *)fn, 2, lo, hi);
211         }
212         
213 }
214
215 void coroutine_jump(const struct coroutine_state *to)
216 {
217         setcontext(&to->uc);
218         assert(0);
219 }
220
221 void coroutine_switch(struct coroutine_state *from,
222                       const struct coroutine_state *to)
223 {
224         int rc;
225
226         rc = swapcontext(&from->uc, &to->uc);
227         assert(rc == 0);
228 }
229 #endif /* HAVE_UCONTEXT */