1 /* Licensed under LGPLv2.1+ - see LICENSE file for details */
8 #include <sys/socket.h>
11 #include <ccan/time/time.h>
12 #include <ccan/timer/timer.h>
14 static size_t num_fds = 0, max_fds = 0, num_waiting = 0, num_always = 0, max_always = 0, num_exclusive = 0;
15 static struct pollfd *pollfds = NULL;
16 static struct fd **fds = NULL;
17 static struct io_plan **always = NULL;
18 static struct timemono (*nowfn)(void) = time_mono;
19 static int (*pollfn)(struct pollfd *fds, nfds_t nfds, int timeout) = poll;
21 struct timemono (*io_time_override(struct timemono (*now)(void)))(void)
23 struct timemono (*old)(void) = nowfn;
28 int (*io_poll_override(int (*poll)(struct pollfd *fds, nfds_t nfds, int timeout)))(struct pollfd *, nfds_t, int)
30 int (*old)(struct pollfd *fds, nfds_t nfds, int timeout) = pollfn;
35 static bool add_fd(struct fd *fd, short events)
39 pollfds = tal_arr(NULL, struct pollfd, 8);
42 fds = tal_arr(pollfds, struct fd *, 8);
48 if (num_fds + 1 > max_fds) {
49 size_t num = max_fds * 2;
51 if (!tal_resize(&pollfds, num))
53 if (!tal_resize(&fds, num))
58 pollfds[num_fds].events = events;
59 /* In case it's idle. */
61 pollfds[num_fds].fd = -fd->fd - 1;
63 pollfds[num_fds].fd = fd->fd;
64 pollfds[num_fds].revents = 0; /* In case we're iterating now */
66 fd->backend_info = num_fds;
67 fd->exclusive[0] = fd->exclusive[1] = false;
75 static void del_fd(struct fd *fd)
77 size_t n = fd->backend_info;
81 if (pollfds[n].events)
83 if (n != num_fds - 1) {
84 /* Move last one over us. */
85 pollfds[n] = pollfds[num_fds-1];
86 fds[n] = fds[num_fds-1];
87 assert(fds[n]->backend_info == num_fds-1);
88 fds[n]->backend_info = n;
89 } else if (num_fds == 1) {
90 /* Free everything when no more fds. */
91 pollfds = tal_free(pollfds);
94 if (num_always == 0) {
95 always = tal_free(always);
100 fd->backend_info = -1;
102 if (fd->exclusive[IO_IN])
104 if (fd->exclusive[IO_OUT])
108 static void destroy_listener(struct io_listener *l)
114 bool add_listener(struct io_listener *l)
116 if (!add_fd(&l->fd, POLLIN))
118 tal_add_destructor(l, destroy_listener);
122 static int find_always(const struct io_plan *plan)
126 for (i = 0; i < num_always; i++)
127 if (always[i] == plan)
132 static void remove_from_always(const struct io_plan *plan)
136 if (plan->status != IO_ALWAYS)
139 pos = find_always(plan);
142 /* Move last one down if we made a hole */
143 if (pos != num_always-1)
144 always[pos] = always[num_always-1];
147 /* Only free if no fds left either. */
148 if (num_always == 0 && max_fds == 0) {
149 always = tal_free(always);
154 bool backend_new_always(struct io_plan *plan)
156 assert(find_always(plan) == -1);
159 assert(num_always == 0);
160 always = tal_arr(NULL, struct io_plan *, 8);
166 if (num_always + 1 > max_always) {
167 size_t num = max_always * 2;
169 if (!tal_resize(&always, num))
174 always[num_always++] = plan;
178 static void setup_pfd(struct io_conn *conn, struct pollfd *pfd)
180 assert(pfd == &pollfds[conn->fd.backend_info]);
183 if (conn->plan[IO_IN].status == IO_POLLING_NOTSTARTED
184 || conn->plan[IO_IN].status == IO_POLLING_STARTED)
185 pfd->events |= POLLIN;
186 if (conn->plan[IO_OUT].status == IO_POLLING_NOTSTARTED
187 || conn->plan[IO_OUT].status == IO_POLLING_STARTED)
188 pfd->events |= POLLOUT;
191 pfd->fd = conn->fd.fd;
193 pfd->fd = -conn->fd.fd - 1;
197 void backend_new_plan(struct io_conn *conn)
199 struct pollfd *pfd = &pollfds[conn->fd.backend_info];
204 setup_pfd(conn, pfd);
210 void backend_wake(const void *wait)
214 for (i = 0; i < num_fds; i++) {
217 /* Ignore listeners */
218 if (fds[i]->listener)
222 if (c->plan[IO_IN].status == IO_WAITING
223 && c->plan[IO_IN].arg.u1.const_vp == wait)
224 io_do_wakeup(c, IO_IN);
226 if (c->plan[IO_OUT].status == IO_WAITING
227 && c->plan[IO_OUT].arg.u1.const_vp == wait)
228 io_do_wakeup(c, IO_OUT);
232 static void destroy_conn(struct io_conn *conn, bool close_fd)
234 int saved_errno = errno;
240 remove_from_always(&conn->plan[IO_IN]);
241 remove_from_always(&conn->plan[IO_OUT]);
243 /* errno saved/restored by tal_free itself. */
246 conn->finish(conn, conn->finish_arg);
250 static void destroy_conn_close_fd(struct io_conn *conn)
252 destroy_conn(conn, true);
255 bool add_conn(struct io_conn *c)
257 if (!add_fd(&c->fd, 0))
259 tal_add_destructor(c, destroy_conn_close_fd);
263 void cleanup_conn_without_close(struct io_conn *conn)
265 tal_del_destructor(conn, destroy_conn_close_fd);
266 destroy_conn(conn, false);
269 static void accept_conn(struct io_listener *l)
271 int fd = accept(l->fd.fd, NULL, NULL);
273 /* FIXME: What to do here? */
277 io_new_conn(l->ctx, fd, l->init, l->arg);
280 /* Return pointer to exclusive flag for this plan. */
281 static bool *exclusive(struct io_plan *plan)
283 struct io_conn *conn;
285 conn = container_of(plan, struct io_conn, plan[plan->dir]);
286 return &conn->fd.exclusive[plan->dir];
289 /* For simplicity, we do one always at a time */
290 static bool handle_always(void)
294 /* Backwards is simple easier to remove entries */
295 for (i = num_always - 1; i >= 0; i--) {
296 struct io_plan *plan = always[i];
298 if (num_exclusive && !*exclusive(plan))
300 /* Remove first: it might re-add */
301 if (i != num_always-1)
302 always[i] = always[num_always-1];
311 bool backend_set_exclusive(struct io_plan *plan, bool excl)
313 bool *excl_ptr = exclusive(plan);
315 if (excl != *excl_ptr) {
323 return num_exclusive != 0;
326 /* FIXME: We could do this once at set_exclusive time, and catch everywhere
327 * else that we manipulate events. */
328 static void exclude_pollfds(void)
332 if (num_exclusive == 0)
335 for (i = 0; i < num_fds; i++) {
336 struct pollfd *pfd = &pollfds[fds[i]->backend_info];
338 if (!fds[i]->exclusive[IO_IN])
339 pfd->events &= ~POLLIN;
340 if (!fds[i]->exclusive[IO_OUT])
341 pfd->events &= ~POLLOUT;
343 /* If we're not listening, we don't want error events
346 pfd->fd = -fds[i]->fd - 1;
350 static void restore_pollfds(void)
354 if (num_exclusive == 0)
357 for (i = 0; i < num_fds; i++) {
358 struct pollfd *pfd = &pollfds[fds[i]->backend_info];
360 if (fds[i]->listener) {
361 pfd->events = POLLIN;
362 pfd->fd = fds[i]->fd;
364 struct io_conn *conn = (void *)fds[i];
365 setup_pfd(conn, pfd);
370 /* This is the main loop. */
371 void *io_loop(struct timers *timers, struct timer **expired)
375 /* if timers is NULL, expired must be. If not, not. */
376 assert(!timers == !expired);
378 /* Make sure this is NULL if we exit for some other reason. */
382 while (!io_loop_return) {
383 int i, r, ms_timeout = -1;
385 if (handle_always()) {
386 /* Could have started/finished more. */
390 /* Everything closed? */
394 /* You can't tell them all to go to sleep! */
398 struct timemono now, first;
402 /* Call functions for expired timers. */
403 *expired = timers_expire(timers, now);
407 /* Now figure out how long to wait for the next one. */
408 if (timer_earliest(timers, &first)) {
410 next = time_to_msec(timemono_between(first, now));
414 ms_timeout = INT_MAX;
418 /* We do this temporarily, assuming exclusive is unusual */
420 r = pollfn(pollfds, num_fds, ms_timeout);
424 /* Signals shouldn't break us, unless they set
431 for (i = 0; i < num_fds && !io_loop_return; i++) {
432 struct io_conn *c = (void *)fds[i];
433 int events = pollfds[i].revents;
435 /* Clear so we don't get confused if exclusive next time */
436 pollfds[i].revents = 0;
441 if (fds[i]->listener) {
442 struct io_listener *l = (void *)fds[i];
443 if (events & POLLIN) {
446 } else if (events & (POLLHUP|POLLNVAL|POLLERR)) {
449 io_close_listener(l);
451 } else if (events & (POLLIN|POLLOUT)) {
454 } else if (events & (POLLHUP|POLLNVAL|POLLERR)) {
462 ret = io_loop_return;
463 io_loop_return = NULL;
468 const void *io_have_fd(int fd, bool *listener)
470 for (size_t i = 0; i < num_fds; i++) {
471 if (fds[i]->fd != fd)
474 *listener = fds[i]->listener;