1 /* Licensed under LGPLv2.1+ - see LICENSE file for details */
8 #include <sys/socket.h>
12 static size_t num_fds = 0, max_fds = 0, num_closing = 0, num_waiting = 0;
13 static struct pollfd *pollfds = NULL;
14 static struct fd **fds = NULL;
15 static struct timers timeouts;
17 static unsigned int io_loop_level;
18 static struct io_conn *free_later;
19 static void io_loop_enter(void)
23 static void io_loop_exit(void)
26 if (io_loop_level == 0) {
29 struct io_conn *c = free_later;
30 free_later = c->finish_arg;
35 static void free_conn(struct io_conn *conn)
37 /* Only free on final exit: chain via finish. */
38 if (io_loop_level > 1) {
40 for (c = free_later; c; c = c->finish_arg)
42 conn->finish_arg = free_later;
48 static void io_loop_enter(void)
51 static void io_loop_exit(void)
54 static void free_conn(struct io_conn *conn)
60 static bool add_fd(struct fd *fd, short events)
62 if (num_fds + 1 > max_fds) {
63 struct pollfd *newpollfds;
65 size_t num = max_fds ? max_fds * 2 : 8;
67 newpollfds = realloc(pollfds, sizeof(*newpollfds) * num);
71 newfds = realloc(fds, sizeof(*newfds) * num);
78 pollfds[num_fds].events = events;
79 /* In case it's idle. */
81 pollfds[num_fds].fd = -fd->fd;
83 pollfds[num_fds].fd = fd->fd;
84 pollfds[num_fds].revents = 0; /* In case we're iterating now */
86 fd->backend_info = num_fds;
94 static void del_fd(struct fd *fd)
96 size_t n = fd->backend_info;
100 if (pollfds[n].events)
102 if (n != num_fds - 1) {
103 /* Move last one over us. */
104 pollfds[n] = pollfds[num_fds-1];
105 fds[n] = fds[num_fds-1];
106 assert(fds[n]->backend_info == num_fds-1);
107 fds[n]->backend_info = n;
108 } else if (num_fds == 1) {
109 /* Free everything when no more fds. */
117 fd->backend_info = -1;
121 bool add_listener(struct io_listener *l)
123 if (!add_fd(&l->fd, POLLIN))
128 void backend_plan_changed(struct io_conn *conn)
132 /* This can happen with debugging and delayed free... */
133 if (conn->fd.backend_info == -1)
136 pfd = &pollfds[conn->fd.backend_info];
141 pfd->events = conn->plan.pollflag;
143 int mask = conn->duplex->plan.pollflag;
144 /* You can't *both* read/write. */
145 assert(!mask || pfd->events != mask);
150 pfd->fd = conn->fd.fd;
152 pfd->fd = -conn->fd.fd;
154 if (!conn->plan.next)
158 bool add_conn(struct io_conn *c)
160 if (!add_fd(&c->fd, c->plan.pollflag))
162 /* Immediate close is allowed. */
168 bool add_duplex(struct io_conn *c)
170 c->fd.backend_info = c->duplex->fd.backend_info;
171 backend_plan_changed(c);
175 static void del_conn(struct io_conn *conn)
178 errno = conn->plan.u.close.saved_errno;
179 conn->finish(conn, conn->finish_arg);
181 if (timeout_active(conn))
182 backend_del_timeout(conn);
185 /* In case fds[] pointed to the other one. */
186 fds[conn->fd.backend_info] = &conn->duplex->fd;
187 conn->duplex->duplex = NULL;
188 conn->fd.backend_info = -1;
194 void del_listener(struct io_listener *l)
199 static void set_plan(struct io_conn *conn, struct io_plan plan)
202 backend_plan_changed(conn);
205 static void accept_conn(struct io_listener *l)
207 int fd = accept(l->fd.fd, NULL, NULL);
209 /* FIXME: What to do here? */
215 /* It's OK to miss some, as long as we make progress. */
216 static void finish_conns(void)
220 for (i = 0; !io_loop_return && i < num_fds; i++) {
221 struct io_conn *c, *duplex;
226 if (fds[i]->listener)
229 for (duplex = c->duplex; c; c = duplex, duplex = NULL) {
239 void backend_add_timeout(struct io_conn *conn, struct timespec duration)
242 timers_init(&timeouts, time_now());
243 timer_add(&timeouts, &conn->timeout->timer,
244 time_add(time_now(), duration));
245 conn->timeout->conn = conn;
248 void backend_del_timeout(struct io_conn *conn)
250 assert(conn->timeout->conn == conn);
251 timer_del(&timeouts, &conn->timeout->timer);
252 conn->timeout->conn = NULL;
255 /* This is the main loop. */
262 while (!io_loop_return) {
263 int i, r, timeout = INT_MAX;
265 bool some_timeouts = false;
268 struct timespec first;
269 struct list_head expired;
270 struct io_timeout *t;
274 /* Call functions for expired timers. */
275 timers_expire(&timeouts, now, &expired);
276 while ((t = list_pop(&expired, struct io_timeout, timer.list))) {
277 struct io_conn *conn = t->conn;
278 /* Clear, in case timer re-adds */
281 set_plan(conn, t->next(conn, t->next_arg));
282 some_timeouts = true;
285 /* Now figure out how long to wait for the next one. */
286 if (timer_earliest(&timeouts, &first)) {
287 uint64_t f = time_to_msec(time_sub(first, now));
295 /* Could have started/finished more. */
299 /* debug can recurse on io_loop; anything can change. */
300 if (doing_debug() && some_timeouts)
306 /* You can't tell them all to go to sleep! */
309 r = poll(pollfds, num_fds, timeout);
313 for (i = 0; i < num_fds && !io_loop_return; i++) {
314 struct io_conn *c = (void *)fds[i];
315 int events = pollfds[i].revents;
320 if (fds[i]->listener) {
321 if (events & POLLIN) {
322 accept_conn((void *)c);
325 } else if (events & (POLLIN|POLLOUT)) {
328 int mask = c->duplex->plan.pollflag;
332 /* debug can recurse;
333 * anything can change. */
336 if (!(events&(POLLIN|POLLOUT)))
341 /* debug can recurse; anything can change. */
344 } else if (events & (POLLHUP|POLLNVAL|POLLERR)) {
348 set_plan(c, io_close());
350 set_current(c->duplex);
351 set_plan(c->duplex, io_close());
360 ret = io_loop_return;
361 io_loop_return = NULL;