1 /* Licensed under LGPLv2.1+ - see LICENSE file for details */
8 #include <sys/socket.h>
11 #include <ccan/time/time.h>
12 #include <ccan/timer/timer.h>
14 static size_t num_fds = 0, max_fds = 0, num_waiting = 0, num_always = 0, max_always = 0, num_exclusive = 0;
15 static struct pollfd *pollfds = NULL;
16 static struct fd **fds = NULL;
17 static struct io_plan **always = NULL;
18 static struct timemono (*nowfn)(void) = time_mono;
19 static int (*pollfn)(struct pollfd *fds, nfds_t nfds, int timeout) = poll;
21 struct timemono (*io_time_override(struct timemono (*now)(void)))(void)
23 struct timemono (*old)(void) = nowfn;
28 int (*io_poll_override(int (*poll)(struct pollfd *fds, nfds_t nfds, int timeout)))(struct pollfd *, nfds_t, int)
30 int (*old)(struct pollfd *fds, nfds_t nfds, int timeout) = pollfn;
35 static bool add_fd(struct fd *fd, short events)
39 pollfds = tal_arr(NULL, struct pollfd, 8);
42 fds = tal_arr(pollfds, struct fd *, 8);
48 if (num_fds + 1 > max_fds) {
49 size_t num = max_fds * 2;
51 if (!tal_resize(&pollfds, num))
53 if (!tal_resize(&fds, num))
58 pollfds[num_fds].events = events;
59 /* In case it's idle. */
61 pollfds[num_fds].fd = -fd->fd - 1;
63 pollfds[num_fds].fd = fd->fd;
64 pollfds[num_fds].revents = 0; /* In case we're iterating now */
66 fd->backend_info = num_fds;
67 fd->exclusive[0] = fd->exclusive[1] = false;
75 static void del_fd(struct fd *fd)
77 size_t n = fd->backend_info;
81 if (pollfds[n].events)
83 if (n != num_fds - 1) {
84 /* Move last one over us. */
85 pollfds[n] = pollfds[num_fds-1];
86 fds[n] = fds[num_fds-1];
87 assert(fds[n]->backend_info == num_fds-1);
88 fds[n]->backend_info = n;
89 } else if (num_fds == 1) {
90 /* Free everything when no more fds. */
91 pollfds = tal_free(pollfds);
96 fd->backend_info = -1;
98 if (fd->exclusive[IO_IN])
100 if (fd->exclusive[IO_OUT])
104 static void destroy_listener(struct io_listener *l)
110 bool add_listener(struct io_listener *l)
112 if (!add_fd(&l->fd, POLLIN))
114 tal_add_destructor(l, destroy_listener);
118 static int find_always(const struct io_plan *plan)
120 for (size_t i = 0; i < num_always; i++)
121 if (always[i] == plan)
126 static void remove_from_always(const struct io_plan *plan)
130 if (plan->status != IO_ALWAYS)
133 pos = find_always(plan);
136 /* Move last one down if we made a hole */
137 if (pos != num_always-1)
138 always[pos] = always[num_always-1];
142 bool backend_new_always(struct io_plan *plan)
144 assert(find_always(plan) == -1);
147 assert(num_always == 0);
148 always = tal_arr(NULL, struct io_plan *, 8);
154 if (num_always + 1 > max_always) {
155 size_t num = max_always * 2;
157 if (!tal_resize(&always, num))
162 always[num_always++] = plan;
166 static void setup_pfd(struct io_conn *conn, struct pollfd *pfd)
168 assert(pfd == &pollfds[conn->fd.backend_info]);
171 if (conn->plan[IO_IN].status == IO_POLLING_NOTSTARTED
172 || conn->plan[IO_IN].status == IO_POLLING_STARTED)
173 pfd->events |= POLLIN;
174 if (conn->plan[IO_OUT].status == IO_POLLING_NOTSTARTED
175 || conn->plan[IO_OUT].status == IO_POLLING_STARTED)
176 pfd->events |= POLLOUT;
179 pfd->fd = conn->fd.fd;
181 pfd->fd = -conn->fd.fd - 1;
185 void backend_new_plan(struct io_conn *conn)
187 struct pollfd *pfd = &pollfds[conn->fd.backend_info];
192 setup_pfd(conn, pfd);
198 void backend_wake(const void *wait)
202 for (i = 0; i < num_fds; i++) {
205 /* Ignore listeners */
206 if (fds[i]->listener)
210 if (c->plan[IO_IN].status == IO_WAITING
211 && c->plan[IO_IN].arg.u1.const_vp == wait)
212 io_do_wakeup(c, IO_IN);
214 if (c->plan[IO_OUT].status == IO_WAITING
215 && c->plan[IO_OUT].arg.u1.const_vp == wait)
216 io_do_wakeup(c, IO_OUT);
220 static void destroy_conn(struct io_conn *conn, bool close_fd)
222 int saved_errno = errno;
228 remove_from_always(&conn->plan[IO_IN]);
229 remove_from_always(&conn->plan[IO_OUT]);
231 /* errno saved/restored by tal_free itself. */
234 conn->finish(conn, conn->finish_arg);
238 static void destroy_conn_close_fd(struct io_conn *conn)
240 destroy_conn(conn, true);
243 bool add_conn(struct io_conn *c)
245 if (!add_fd(&c->fd, 0))
247 tal_add_destructor(c, destroy_conn_close_fd);
251 void cleanup_conn_without_close(struct io_conn *conn)
253 tal_del_destructor(conn, destroy_conn_close_fd);
254 destroy_conn(conn, false);
257 static void accept_conn(struct io_listener *l)
259 int fd = accept(l->fd.fd, NULL, NULL);
261 /* FIXME: What to do here? */
265 io_new_conn(l->ctx, fd, l->init, l->arg);
268 /* Return pointer to exclusive flag for this plan. */
269 static bool *exclusive(struct io_plan *plan)
271 struct io_conn *conn;
273 conn = container_of(plan, struct io_conn, plan[plan->dir]);
274 return &conn->fd.exclusive[plan->dir];
277 /* For simplicity, we do one always at a time */
278 static bool handle_always(void)
280 /* Backwards is simple easier to remove entries */
281 for (int i = num_always - 1; i >= 0; i--) {
282 struct io_plan *plan = always[i];
284 if (num_exclusive && !*exclusive(plan))
286 /* Remove first: it might re-add */
287 if (i != num_always-1)
288 always[i] = always[num_always-1];
297 bool backend_set_exclusive(struct io_plan *plan, bool excl)
299 bool *excl_ptr = exclusive(plan);
301 if (excl != *excl_ptr) {
309 return num_exclusive != 0;
312 /* FIXME: We could do this once at set_exclusive time, and catch everywhere
313 * else that we manipulate events. */
314 static void exclude_pollfds(void)
316 if (num_exclusive == 0)
319 for (size_t i = 0; i < num_fds; i++) {
320 struct pollfd *pfd = &pollfds[fds[i]->backend_info];
322 if (!fds[i]->exclusive[IO_IN])
323 pfd->events &= ~POLLIN;
324 if (!fds[i]->exclusive[IO_OUT])
325 pfd->events &= ~POLLOUT;
327 /* If we're not listening, we don't want error events
330 pfd->fd = -fds[i]->fd - 1;
334 static void restore_pollfds(void)
336 if (num_exclusive == 0)
339 for (size_t i = 0; i < num_fds; i++) {
340 struct pollfd *pfd = &pollfds[fds[i]->backend_info];
342 if (fds[i]->listener) {
343 pfd->events = POLLIN;
344 pfd->fd = fds[i]->fd;
346 struct io_conn *conn = (void *)fds[i];
347 setup_pfd(conn, pfd);
352 /* This is the main loop. */
353 void *io_loop(struct timers *timers, struct timer **expired)
357 /* if timers is NULL, expired must be. If not, not. */
358 assert(!timers == !expired);
360 /* Make sure this is NULL if we exit for some other reason. */
364 while (!io_loop_return) {
365 int i, r, ms_timeout = -1;
367 if (handle_always()) {
368 /* Could have started/finished more. */
372 /* Everything closed? */
376 /* You can't tell them all to go to sleep! */
380 struct timemono now, first;
384 /* Call functions for expired timers. */
385 *expired = timers_expire(timers, now);
389 /* Now figure out how long to wait for the next one. */
390 if (timer_earliest(timers, &first)) {
392 next = time_to_msec(timemono_between(first, now));
396 ms_timeout = INT_MAX;
400 /* We do this temporarily, assuming exclusive is unusual */
402 r = pollfn(pollfds, num_fds, ms_timeout);
406 /* Signals shouldn't break us, unless they set
413 for (i = 0; i < num_fds && !io_loop_return; i++) {
414 struct io_conn *c = (void *)fds[i];
415 int events = pollfds[i].revents;
417 /* Clear so we don't get confused if exclusive next time */
418 pollfds[i].revents = 0;
423 if (fds[i]->listener) {
424 struct io_listener *l = (void *)fds[i];
425 if (events & POLLIN) {
428 } else if (events & (POLLHUP|POLLNVAL|POLLERR)) {
431 io_close_listener(l);
433 } else if (events & (POLLIN|POLLOUT)) {
436 } else if (events & (POLLHUP|POLLNVAL|POLLERR)) {
444 ret = io_loop_return;
445 io_loop_return = NULL;