1 /* Licensed under LGPLv2.1+ - see LICENSE file for details */
8 #include <sys/socket.h>
12 static size_t num_fds = 0, max_fds = 0, num_closing = 0, num_waiting = 0;
13 static bool some_always = false;
14 static struct pollfd *pollfds = NULL;
15 static struct fd **fds = NULL;
16 static struct timers timeouts;
18 static unsigned int io_loop_level;
19 static struct io_conn *free_later;
20 static void io_loop_enter(void)
24 static void io_loop_exit(void)
27 if (io_loop_level == 0) {
30 struct io_conn *c = free_later;
31 free_later = c->finish_arg;
36 static void free_conn(struct io_conn *conn)
38 /* Only free on final exit: chain via finish. */
39 if (io_loop_level > 1) {
41 for (c = free_later; c; c = c->finish_arg)
43 conn->finish_arg = free_later;
49 static void io_loop_enter(void)
52 static void io_loop_exit(void)
55 static void free_conn(struct io_conn *conn)
61 static bool add_fd(struct fd *fd, short events)
63 if (num_fds + 1 > max_fds) {
64 struct pollfd *newpollfds;
66 size_t num = max_fds ? max_fds * 2 : 8;
68 newpollfds = io_alloc.realloc(pollfds, sizeof(*newpollfds)*num);
72 newfds = io_alloc.realloc(fds, sizeof(*newfds) * num);
79 pollfds[num_fds].events = events;
80 /* In case it's idle. */
82 pollfds[num_fds].fd = -fd->fd;
84 pollfds[num_fds].fd = fd->fd;
85 pollfds[num_fds].revents = 0; /* In case we're iterating now */
87 fd->backend_info = num_fds;
95 static void del_fd(struct fd *fd)
97 size_t n = fd->backend_info;
101 if (pollfds[n].events)
103 if (n != num_fds - 1) {
104 /* Move last one over us. */
105 pollfds[n] = pollfds[num_fds-1];
106 fds[n] = fds[num_fds-1];
107 assert(fds[n]->backend_info == num_fds-1);
108 fds[n]->backend_info = n;
109 /* If that happens to be a duplex, move that too. */
110 if (!fds[n]->listener) {
111 struct io_conn *c = (void *)fds[n];
113 assert(c->duplex->fd.backend_info == num_fds-1);
114 c->duplex->fd.backend_info = n;
117 } else if (num_fds == 1) {
118 /* Free everything when no more fds. */
119 io_alloc.free(pollfds);
126 fd->backend_info = -1;
130 bool add_listener(struct io_listener *l)
132 if (!add_fd(&l->fd, POLLIN))
137 void backend_plan_changed(struct io_conn *conn)
141 /* This can happen with debugging and delayed free... */
142 if (conn->fd.backend_info == -1)
145 pfd = &pollfds[conn->fd.backend_info];
150 pfd->events = conn->plan.pollflag & (POLLIN|POLLOUT);
152 int mask = conn->duplex->plan.pollflag & (POLLIN|POLLOUT);
153 /* You can't *both* read/write. */
154 assert(!mask || pfd->events != mask);
159 pfd->fd = conn->fd.fd;
161 pfd->fd = -conn->fd.fd;
163 if (!conn->plan.next)
166 if (conn->plan.pollflag == POLLALWAYS)
170 void backend_wait_changed(const void *wait)
174 for (i = 0; i < num_fds; i++) {
175 struct io_conn *c, *duplex;
177 /* Ignore listeners */
178 if (fds[i]->listener)
181 for (duplex = c->duplex; c; c = duplex, duplex = NULL) {
182 /* Ignore closing. */
188 /* Waiting on something else? */
189 if (c->plan.u1.const_vp != wait)
191 /* Make it do the next thing. */
192 c->plan = io_always_(c->plan.next, c->plan.next_arg);
193 backend_plan_changed(c);
198 bool add_conn(struct io_conn *c)
200 if (!add_fd(&c->fd, c->plan.pollflag & (POLLIN|POLLOUT)))
202 /* Immediate close is allowed. */
205 if (c->plan.pollflag == POLLALWAYS)
210 bool add_duplex(struct io_conn *c)
212 c->fd.backend_info = c->duplex->fd.backend_info;
213 backend_plan_changed(c);
217 void backend_del_conn(struct io_conn *conn)
219 if (timeout_active(conn))
220 backend_del_timeout(conn);
221 io_alloc.free(conn->timeout);
223 /* In case fds[] pointed to the other one. */
224 assert(conn->duplex->fd.backend_info == conn->fd.backend_info);
225 fds[conn->fd.backend_info] = &conn->duplex->fd;
226 conn->duplex->duplex = NULL;
227 conn->fd.backend_info = -1;
232 /* Saved by io_close */
233 errno = conn->plan.u1.s;
234 conn->finish(conn, conn->finish_arg);
239 void del_listener(struct io_listener *l)
244 static void set_plan(struct io_conn *conn, struct io_plan plan)
247 backend_plan_changed(conn);
250 static void accept_conn(struct io_listener *l)
252 int fd = accept(l->fd.fd, NULL, NULL);
254 /* FIXME: What to do here? */
260 /* It's OK to miss some, as long as we make progress. */
261 static bool finish_conns(struct io_conn **ready)
265 for (i = 0; !io_loop_return && i < num_fds; i++) {
266 struct io_conn *c, *duplex;
271 if (fds[i]->listener)
274 for (duplex = c->duplex; c; c = duplex, duplex = NULL) {
276 if (doing_debug_on(c) && ready) {
288 void backend_add_timeout(struct io_conn *conn, struct timerel duration)
291 timers_init(&timeouts, time_now());
292 timer_add(&timeouts, &conn->timeout->timer,
293 timeabs_add(time_now(), duration));
294 conn->timeout->conn = conn;
297 void backend_del_timeout(struct io_conn *conn)
299 assert(conn->timeout->conn == conn);
300 timer_del(&timeouts, &conn->timeout->timer);
301 conn->timeout->conn = NULL;
304 static void handle_always(void)
310 for (i = 0; i < num_fds && !io_loop_return; i++) {
311 struct io_conn *c = (void *)fds[i];
313 if (fds[i]->listener)
316 if (c->plan.pollflag == POLLALWAYS)
319 if (c->duplex && c->duplex->plan.pollflag == POLLALWAYS)
324 /* This is the main loop. */
325 void *do_io_loop(struct io_conn **ready)
331 while (!io_loop_return) {
332 int i, r, timeout = INT_MAX;
334 bool some_timeouts = false;
337 struct timeabs first;
338 struct list_head expired;
339 struct io_timeout *t;
343 /* Call functions for expired timers. */
344 timers_expire(&timeouts, now, &expired);
345 while ((t = list_pop(&expired, struct io_timeout, timer.list))) {
346 struct io_conn *conn = t->conn;
347 /* Clear, in case timer re-adds */
350 set_plan(conn, t->next(conn, t->next_arg));
351 some_timeouts = true;
354 /* Now figure out how long to wait for the next one. */
355 if (timer_earliest(&timeouts, &first)) {
356 uint64_t f = time_to_msec(time_between(first, now));
363 /* If this finishes a debugging con, return now. */
364 if (finish_conns(ready))
366 /* Could have started/finished more. */
370 /* debug can recurse on io_loop; anything can change. */
371 if (doing_debug() && some_timeouts)
382 /* You can't tell them all to go to sleep! */
385 r = poll(pollfds, num_fds, timeout);
389 for (i = 0; i < num_fds && !io_loop_return; i++) {
390 struct io_conn *c = (void *)fds[i];
391 int events = pollfds[i].revents;
396 if (fds[i]->listener) {
397 if (events & POLLIN) {
398 accept_conn((void *)c);
401 } else if (events & (POLLIN|POLLOUT)) {
404 int mask = c->duplex->plan.pollflag;
406 if (doing_debug_on(c->duplex)
413 /* debug can recurse;
414 * anything can change. */
418 /* If no events, or it closed
419 * the duplex, continue. */
420 if (!(events&(POLLIN|POLLOUT))
425 if (doing_debug_on(c) && ready) {
430 /* debug can recurse; anything can change. */
433 } else if (events & (POLLHUP|POLLNVAL|POLLERR)) {
437 set_plan(c, io_close());
439 set_current(c->duplex);
440 set_plan(c->duplex, io_close());
446 while (num_closing && !io_loop_return) {
447 if (finish_conns(ready))
451 ret = io_loop_return;
452 io_loop_return = NULL;
460 return do_io_loop(NULL);