1 /* Licensed under LGPLv2.1+ - see LICENSE file for details */
8 #include <sys/socket.h>
11 #include <ccan/time/time.h>
12 #include <ccan/timer/timer.h>
14 static size_t num_fds = 0, max_fds = 0, num_waiting = 0, num_always = 0, max_always = 0, num_exclusive = 0;
15 static struct pollfd *pollfds = NULL;
16 static struct fd **fds = NULL;
17 static struct io_plan **always = NULL;
18 static struct timemono (*nowfn)(void) = time_mono;
19 static int (*pollfn)(struct pollfd *fds, nfds_t nfds, int timeout) = poll;
21 struct timemono (*io_time_override(struct timemono (*now)(void)))(void)
23 struct timemono (*old)(void) = nowfn;
28 int (*io_poll_override(int (*poll)(struct pollfd *fds, nfds_t nfds, int timeout)))(struct pollfd *, nfds_t, int)
30 int (*old)(struct pollfd *fds, nfds_t nfds, int timeout) = pollfn;
35 static bool add_fd(struct fd *fd, short events)
39 pollfds = tal_arr(NULL, struct pollfd, 8);
42 fds = tal_arr(pollfds, struct fd *, 8);
48 if (num_fds + 1 > max_fds) {
49 size_t num = max_fds * 2;
51 if (!tal_resize(&pollfds, num))
53 if (!tal_resize(&fds, num))
58 pollfds[num_fds].events = events;
59 /* In case it's idle. */
61 pollfds[num_fds].fd = -fd->fd - 1;
63 pollfds[num_fds].fd = fd->fd;
64 pollfds[num_fds].revents = 0; /* In case we're iterating now */
66 fd->backend_info = num_fds;
67 fd->exclusive[0] = fd->exclusive[1] = false;
75 static void del_fd(struct fd *fd)
77 size_t n = fd->backend_info;
81 if (pollfds[n].events)
83 if (n != num_fds - 1) {
84 /* Move last one over us. */
85 pollfds[n] = pollfds[num_fds-1];
86 fds[n] = fds[num_fds-1];
87 assert(fds[n]->backend_info == num_fds-1);
88 fds[n]->backend_info = n;
89 } else if (num_fds == 1) {
90 /* Free everything when no more fds. */
91 pollfds = tal_free(pollfds);
94 if (num_always == 0) {
95 always = tal_free(always);
100 fd->backend_info = -1;
102 if (fd->exclusive[IO_IN])
104 if (fd->exclusive[IO_OUT])
108 static void destroy_listener(struct io_listener *l)
114 bool add_listener(struct io_listener *l)
116 if (!add_fd(&l->fd, POLLIN))
118 tal_add_destructor(l, destroy_listener);
122 static int find_always(const struct io_plan *plan)
124 for (size_t i = 0; i < num_always; i++)
125 if (always[i] == plan)
130 static void remove_from_always(const struct io_plan *plan)
134 if (plan->status != IO_ALWAYS)
137 pos = find_always(plan);
140 /* Move last one down if we made a hole */
141 if (pos != num_always-1)
142 always[pos] = always[num_always-1];
145 /* Only free if no fds left either. */
146 if (num_always == 0 && max_fds == 0) {
147 always = tal_free(always);
152 bool backend_new_always(struct io_plan *plan)
154 assert(find_always(plan) == -1);
157 assert(num_always == 0);
158 always = tal_arr(NULL, struct io_plan *, 8);
164 if (num_always + 1 > max_always) {
165 size_t num = max_always * 2;
167 if (!tal_resize(&always, num))
172 always[num_always++] = plan;
176 static void setup_pfd(struct io_conn *conn, struct pollfd *pfd)
178 assert(pfd == &pollfds[conn->fd.backend_info]);
181 if (conn->plan[IO_IN].status == IO_POLLING_NOTSTARTED
182 || conn->plan[IO_IN].status == IO_POLLING_STARTED)
183 pfd->events |= POLLIN;
184 if (conn->plan[IO_OUT].status == IO_POLLING_NOTSTARTED
185 || conn->plan[IO_OUT].status == IO_POLLING_STARTED)
186 pfd->events |= POLLOUT;
189 pfd->fd = conn->fd.fd;
191 pfd->fd = -conn->fd.fd - 1;
195 void backend_new_plan(struct io_conn *conn)
197 struct pollfd *pfd = &pollfds[conn->fd.backend_info];
202 setup_pfd(conn, pfd);
208 void backend_wake(const void *wait)
212 for (i = 0; i < num_fds; i++) {
215 /* Ignore listeners */
216 if (fds[i]->listener)
220 if (c->plan[IO_IN].status == IO_WAITING
221 && c->plan[IO_IN].arg.u1.const_vp == wait)
222 io_do_wakeup(c, IO_IN);
224 if (c->plan[IO_OUT].status == IO_WAITING
225 && c->plan[IO_OUT].arg.u1.const_vp == wait)
226 io_do_wakeup(c, IO_OUT);
230 static void destroy_conn(struct io_conn *conn, bool close_fd)
232 int saved_errno = errno;
238 remove_from_always(&conn->plan[IO_IN]);
239 remove_from_always(&conn->plan[IO_OUT]);
241 /* errno saved/restored by tal_free itself. */
244 conn->finish(conn, conn->finish_arg);
248 static void destroy_conn_close_fd(struct io_conn *conn)
250 destroy_conn(conn, true);
253 bool add_conn(struct io_conn *c)
255 if (!add_fd(&c->fd, 0))
257 tal_add_destructor(c, destroy_conn_close_fd);
261 void cleanup_conn_without_close(struct io_conn *conn)
263 tal_del_destructor(conn, destroy_conn_close_fd);
264 destroy_conn(conn, false);
267 static void accept_conn(struct io_listener *l)
269 int fd = accept(l->fd.fd, NULL, NULL);
271 /* FIXME: What to do here? */
275 io_new_conn(l->ctx, fd, l->init, l->arg);
278 /* Return pointer to exclusive flag for this plan. */
279 static bool *exclusive(struct io_plan *plan)
281 struct io_conn *conn;
283 conn = container_of(plan, struct io_conn, plan[plan->dir]);
284 return &conn->fd.exclusive[plan->dir];
287 /* For simplicity, we do one always at a time */
288 static bool handle_always(void)
290 /* Backwards is simple easier to remove entries */
291 for (int i = num_always - 1; i >= 0; i--) {
292 struct io_plan *plan = always[i];
294 if (num_exclusive && !*exclusive(plan))
296 /* Remove first: it might re-add */
297 if (i != num_always-1)
298 always[i] = always[num_always-1];
307 bool backend_set_exclusive(struct io_plan *plan, bool excl)
309 bool *excl_ptr = exclusive(plan);
311 if (excl != *excl_ptr) {
319 return num_exclusive != 0;
322 /* FIXME: We could do this once at set_exclusive time, and catch everywhere
323 * else that we manipulate events. */
324 static void exclude_pollfds(void)
326 if (num_exclusive == 0)
329 for (size_t i = 0; i < num_fds; i++) {
330 struct pollfd *pfd = &pollfds[fds[i]->backend_info];
332 if (!fds[i]->exclusive[IO_IN])
333 pfd->events &= ~POLLIN;
334 if (!fds[i]->exclusive[IO_OUT])
335 pfd->events &= ~POLLOUT;
337 /* If we're not listening, we don't want error events
340 pfd->fd = -fds[i]->fd - 1;
344 static void restore_pollfds(void)
346 if (num_exclusive == 0)
349 for (size_t i = 0; i < num_fds; i++) {
350 struct pollfd *pfd = &pollfds[fds[i]->backend_info];
352 if (fds[i]->listener) {
353 pfd->events = POLLIN;
354 pfd->fd = fds[i]->fd;
356 struct io_conn *conn = (void *)fds[i];
357 setup_pfd(conn, pfd);
362 /* This is the main loop. */
363 void *io_loop(struct timers *timers, struct timer **expired)
367 /* if timers is NULL, expired must be. If not, not. */
368 assert(!timers == !expired);
370 /* Make sure this is NULL if we exit for some other reason. */
374 while (!io_loop_return) {
375 int i, r, ms_timeout = -1;
377 if (handle_always()) {
378 /* Could have started/finished more. */
382 /* Everything closed? */
386 /* You can't tell them all to go to sleep! */
390 struct timemono now, first;
394 /* Call functions for expired timers. */
395 *expired = timers_expire(timers, now);
399 /* Now figure out how long to wait for the next one. */
400 if (timer_earliest(timers, &first)) {
402 next = time_to_msec(timemono_between(first, now));
406 ms_timeout = INT_MAX;
410 /* We do this temporarily, assuming exclusive is unusual */
412 r = pollfn(pollfds, num_fds, ms_timeout);
416 /* Signals shouldn't break us, unless they set
423 for (i = 0; i < num_fds && !io_loop_return; i++) {
424 struct io_conn *c = (void *)fds[i];
425 int events = pollfds[i].revents;
427 /* Clear so we don't get confused if exclusive next time */
428 pollfds[i].revents = 0;
433 if (fds[i]->listener) {
434 struct io_listener *l = (void *)fds[i];
435 if (events & POLLIN) {
438 } else if (events & (POLLHUP|POLLNVAL|POLLERR)) {
441 io_close_listener(l);
443 } else if (events & (POLLIN|POLLOUT)) {
446 } else if (events & (POLLHUP|POLLNVAL|POLLERR)) {
454 ret = io_loop_return;
455 io_loop_return = NULL;