]> git.ozlabs.org Git - ccan/blob - ccan/io/poll.c
18691e17b4fc1c8a9247062be0fdaa5e84f9d170
[ccan] / ccan / io / poll.c
1 /* Licensed under LGPLv2.1+ - see LICENSE file for details */
2 #include "io.h"
3 #include "backend.h"
4 #include <assert.h>
5 #include <poll.h>
6 #include <stdlib.h>
7 #include <sys/types.h>
8 #include <sys/socket.h>
9 #include <limits.h>
10 #include <errno.h>
11
12 static size_t num_fds = 0, max_fds = 0, num_closing = 0, num_waiting = 0;
13 static struct pollfd *pollfds = NULL;
14 static struct fd **fds = NULL;
15 static struct timers timeouts;
16 #ifdef DEBUG
17 static unsigned int io_loop_level;
18 static struct io_conn *free_later;
19 static void io_loop_enter(void)
20 {
21         io_loop_level++;
22 }
23 static void io_loop_exit(void)
24 {
25         io_loop_level--;
26         if (io_loop_level == 0) {
27                 /* Delayed free. */
28                 while (free_later) {
29                         struct io_conn *c = free_later;
30                         free_later = c->finish_arg;
31                         io_alloc.free(c);
32                 }
33         }
34 }
35 static void free_conn(struct io_conn *conn)
36 {
37         /* Only free on final exit: chain via finish. */
38         if (io_loop_level > 1) {
39                 struct io_conn *c;
40                 for (c = free_later; c; c = c->finish_arg)
41                         assert(c != conn);
42                 conn->finish_arg = free_later;
43                 free_later = conn;
44         } else
45                 io_alloc.free(conn);
46 }
47 #else
48 static void io_loop_enter(void)
49 {
50 }
51 static void io_loop_exit(void)
52 {
53 }
54 static void free_conn(struct io_conn *conn)
55 {
56         io_alloc.free(conn);
57 }
58 #endif
59
60 static bool add_fd(struct fd *fd, short events)
61 {
62         if (num_fds + 1 > max_fds) {
63                 struct pollfd *newpollfds;
64                 struct fd **newfds;
65                 size_t num = max_fds ? max_fds * 2 : 8;
66
67                 newpollfds = io_alloc.realloc(pollfds, sizeof(*newpollfds)*num);
68                 if (!newpollfds)
69                         return false;
70                 pollfds = newpollfds;
71                 newfds = io_alloc.realloc(fds, sizeof(*newfds) * num);
72                 if (!newfds)
73                         return false;
74                 fds = newfds;
75                 max_fds = num;
76         }
77
78         pollfds[num_fds].events = events;
79         /* In case it's idle. */
80         if (!events)
81                 pollfds[num_fds].fd = -fd->fd;
82         else
83                 pollfds[num_fds].fd = fd->fd;
84         pollfds[num_fds].revents = 0; /* In case we're iterating now */
85         fds[num_fds] = fd;
86         fd->backend_info = num_fds;
87         num_fds++;
88         if (events)
89                 num_waiting++;
90
91         return true;
92 }
93
94 static void del_fd(struct fd *fd)
95 {
96         size_t n = fd->backend_info;
97
98         assert(n != -1);
99         assert(n < num_fds);
100         if (pollfds[n].events)
101                 num_waiting--;
102         if (n != num_fds - 1) {
103                 /* Move last one over us. */
104                 pollfds[n] = pollfds[num_fds-1];
105                 fds[n] = fds[num_fds-1];
106                 assert(fds[n]->backend_info == num_fds-1);
107                 fds[n]->backend_info = n;
108                 /* If that happens to be a duplex, move that too. */
109                 if (!fds[n]->listener) {
110                         struct io_conn *c = (void *)fds[n];
111                         if (c->duplex) {
112                                 assert(c->duplex->fd.backend_info == num_fds-1);
113                                 c->duplex->fd.backend_info = n;
114                         }
115                 }
116         } else if (num_fds == 1) {
117                 /* Free everything when no more fds. */
118                 io_alloc.free(pollfds);
119                 io_alloc.free(fds);
120                 pollfds = NULL;
121                 fds = NULL;
122                 max_fds = 0;
123         }
124         num_fds--;
125         fd->backend_info = -1;
126         close(fd->fd);
127 }
128
129 bool add_listener(struct io_listener *l)
130 {
131         if (!add_fd(&l->fd, POLLIN))
132                 return false;
133         return true;
134 }
135
136 void backend_plan_changed(struct io_conn *conn)
137 {
138         struct pollfd *pfd;
139
140         /* This can happen with debugging and delayed free... */
141         if (conn->fd.backend_info == -1)
142                 return;
143
144         pfd = &pollfds[conn->fd.backend_info];
145
146         if (pfd->events)
147                 num_waiting--;
148
149         pfd->events = conn->plan.pollflag;
150         if (conn->duplex) {
151                 int mask = conn->duplex->plan.pollflag;
152                 /* You can't *both* read/write. */
153                 assert(!mask || pfd->events != mask);
154                 pfd->events |= mask;
155         }
156         if (pfd->events) {
157                 num_waiting++;
158                 pfd->fd = conn->fd.fd;
159         } else
160                 pfd->fd = -conn->fd.fd;
161
162         if (!conn->plan.next)
163                 num_closing++;
164 }
165
166 bool add_conn(struct io_conn *c)
167 {
168         if (!add_fd(&c->fd, c->plan.pollflag))
169                 return false;
170         /* Immediate close is allowed. */
171         if (!c->plan.next)
172                 num_closing++;
173         return true;
174 }
175
176 bool add_duplex(struct io_conn *c)
177 {
178         c->fd.backend_info = c->duplex->fd.backend_info;
179         backend_plan_changed(c);
180         return true;
181 }
182
183 void backend_del_conn(struct io_conn *conn)
184 {
185         if (conn->finish) {
186                 /* Saved by io_close */
187                 errno = conn->plan.u1.s;
188                 conn->finish(conn, conn->finish_arg);
189         }
190         if (timeout_active(conn))
191                 backend_del_timeout(conn);
192         io_alloc.free(conn->timeout);
193         if (conn->duplex) {
194                 /* In case fds[] pointed to the other one. */
195                 assert(conn->duplex->fd.backend_info == conn->fd.backend_info);
196                 fds[conn->fd.backend_info] = &conn->duplex->fd;
197                 conn->duplex->duplex = NULL;
198                 conn->fd.backend_info = -1;
199         } else
200                 del_fd(&conn->fd);
201         num_closing--;
202         free_conn(conn);
203 }
204
205 void del_listener(struct io_listener *l)
206 {
207         del_fd(&l->fd);
208 }
209
210 static void set_plan(struct io_conn *conn, struct io_plan plan)
211 {
212         conn->plan = plan;
213         backend_plan_changed(conn);
214 }
215
216 static void accept_conn(struct io_listener *l)
217 {
218         int fd = accept(l->fd.fd, NULL, NULL);
219
220         /* FIXME: What to do here? */
221         if (fd < 0)
222                 return;
223         l->init(fd, l->arg);
224 }
225
226 /* It's OK to miss some, as long as we make progress. */
227 static bool finish_conns(struct io_conn **ready)
228 {
229         unsigned int i;
230
231         for (i = 0; !io_loop_return && i < num_fds; i++) {
232                 struct io_conn *c, *duplex;
233
234                 if (!num_closing)
235                         break;
236
237                 if (fds[i]->listener)
238                         continue;
239                 c = (void *)fds[i];
240                 for (duplex = c->duplex; c; c = duplex, duplex = NULL) {
241                         if (!c->plan.next) {
242                                 if (doing_debug_on(c) && ready) {
243                                         *ready = c;
244                                         return true;
245                                 }
246                                 backend_del_conn(c);
247                                 i--;
248                         }
249                 }
250         }
251         return false;
252 }
253
254 void backend_add_timeout(struct io_conn *conn, struct timespec duration)
255 {
256         if (!timeouts.base)
257                 timers_init(&timeouts, time_now());
258         timer_add(&timeouts, &conn->timeout->timer,
259                   time_add(time_now(), duration));
260         conn->timeout->conn = conn;
261 }
262
263 void backend_del_timeout(struct io_conn *conn)
264 {
265         assert(conn->timeout->conn == conn);
266         timer_del(&timeouts, &conn->timeout->timer);
267         conn->timeout->conn = NULL;
268 }
269
270 /* This is the main loop. */
271 void *do_io_loop(struct io_conn **ready)
272 {
273         void *ret;
274
275         io_loop_enter();
276
277         while (!io_loop_return) {
278                 int i, r, timeout = INT_MAX;
279                 struct timespec now;
280                 bool some_timeouts = false;
281
282                 if (timeouts.base) {
283                         struct timespec first;
284                         struct list_head expired;
285                         struct io_timeout *t;
286
287                         now = time_now();
288
289                         /* Call functions for expired timers. */
290                         timers_expire(&timeouts, now, &expired);
291                         while ((t = list_pop(&expired, struct io_timeout, timer.list))) {
292                                 struct io_conn *conn = t->conn;
293                                 /* Clear, in case timer re-adds */
294                                 t->conn = NULL;
295                                 set_current(conn);
296                                 set_plan(conn, t->next(conn, t->next_arg));
297                                 some_timeouts = true;
298                         }
299
300                         /* Now figure out how long to wait for the next one. */
301                         if (timer_earliest(&timeouts, &first)) {
302                                 uint64_t f = time_to_msec(time_sub(first, now));
303                                 if (f < INT_MAX)
304                                         timeout = f;
305                         }
306                 }
307
308                 if (num_closing) {
309                         /* If this finishes a debugging con, return now. */
310                         if (finish_conns(ready))
311                                 return NULL;
312                         /* Could have started/finished more. */
313                         continue;
314                 }
315
316                 /* debug can recurse on io_loop; anything can change. */
317                 if (doing_debug() && some_timeouts)
318                         continue;
319
320                 if (num_fds == 0)
321                         break;
322
323                 /* You can't tell them all to go to sleep! */
324                 assert(num_waiting);
325
326                 r = poll(pollfds, num_fds, timeout);
327                 if (r < 0)
328                         break;
329
330                 for (i = 0; i < num_fds && !io_loop_return; i++) {
331                         struct io_conn *c = (void *)fds[i];
332                         int events = pollfds[i].revents;
333
334                         if (r == 0)
335                                 break;
336
337                         if (fds[i]->listener) {
338                                 if (events & POLLIN) {
339                                         accept_conn((void *)c);
340                                         r--;
341                                 }
342                         } else if (events & (POLLIN|POLLOUT)) {
343                                 r--;
344                                 if (c->duplex) {
345                                         int mask = c->duplex->plan.pollflag;
346                                         if (events & mask) {
347                                                 if (doing_debug_on(c->duplex)
348                                                         && ready) {
349                                                         *ready = c->duplex;
350                                                         return NULL;
351                                                 }
352                                                 io_ready(c->duplex);
353                                                 events &= ~mask;
354                                                 /* debug can recurse;
355                                                  * anything can change. */
356                                                 if (doing_debug())
357                                                         break;
358
359                                                 /* If no events, or it closed
360                                                  * the duplex, continue. */
361                                                 if (!(events&(POLLIN|POLLOUT))
362                                                     || !c->plan.next)
363                                                         continue;
364                                         }
365                                 }
366                                 if (doing_debug_on(c) && ready) {
367                                         *ready = c;
368                                         return NULL;
369                                 }
370                                 io_ready(c);
371                                 /* debug can recurse; anything can change. */
372                                 if (doing_debug())
373                                         break;
374                         } else if (events & (POLLHUP|POLLNVAL|POLLERR)) {
375                                 r--;
376                                 set_current(c);
377                                 errno = EBADF;
378                                 set_plan(c, io_close());
379                                 if (c->duplex) {
380                                         set_current(c->duplex);
381                                         set_plan(c->duplex, io_close());
382                                 }
383                         }
384                 }
385         }
386
387         while (num_closing && !io_loop_return) {
388                 if (finish_conns(ready))
389                         return NULL;
390         }
391
392         ret = io_loop_return;
393         io_loop_return = NULL;
394
395         io_loop_exit();
396         return ret;
397 }
398
399 void *io_loop(void)
400 {
401         return do_io_loop(NULL);
402 }