]> git.ozlabs.org Git - ccan/blob - ccan/io/poll.c
ccan/io: initialize connection with an explicit I/O plan.
[ccan] / ccan / io / poll.c
1 /* Licensed under LGPLv2.1+ - see LICENSE file for details */
2 #include "io.h"
3 #include "backend.h"
4 #include <assert.h>
5 #include <poll.h>
6 #include <stdlib.h>
7 #include <sys/types.h>
8 #include <sys/socket.h>
9 #include <limits.h>
10
11 static size_t num_fds = 0, max_fds = 0, num_next = 0, num_finished = 0, num_waiting = 0;
12 static struct pollfd *pollfds = NULL;
13 static struct fd **fds = NULL;
14 static struct timers timeouts;
15
16 static bool add_fd(struct fd *fd, short events)
17 {
18         if (num_fds + 1 > max_fds) {
19                 struct pollfd *newpollfds;
20                 struct fd **newfds;
21                 size_t num = max_fds ? max_fds * 2 : 8;
22
23                 newpollfds = realloc(pollfds, sizeof(*newpollfds) * num);
24                 if (!newpollfds)
25                         return false;
26                 pollfds = newpollfds;
27                 newfds = realloc(fds, sizeof(*newfds) * num);
28                 if (!newfds)
29                         return false;
30                 fds = newfds;
31                 max_fds = num;
32         }
33
34         pollfds[num_fds].fd = fd->fd;
35         pollfds[num_fds].events = events;
36         pollfds[num_fds].revents = 0; /* In case we're iterating now */
37         fds[num_fds] = fd;
38         fd->backend_info = num_fds;
39         num_fds++;
40         if (events)
41                 num_waiting++;
42
43         return true;
44 }
45
46 static void del_fd(struct fd *fd)
47 {
48         size_t n = fd->backend_info;
49
50         assert(n != -1);
51         assert(n < num_fds);
52         if (pollfds[n].events)
53                 num_waiting--;
54         if (n != num_fds - 1) {
55                 /* Move last one over us. */
56                 pollfds[n] = pollfds[num_fds-1];
57                 fds[n] = fds[num_fds-1];
58                 assert(fds[n]->backend_info == num_fds-1);
59                 fds[n]->backend_info = n;
60         } else if (num_fds == 1) {
61                 /* Free everything when no more fds. */
62                 free(pollfds);
63                 free(fds);
64                 pollfds = NULL;
65                 fds = NULL;
66                 max_fds = 0;
67         }
68         num_fds--;
69         fd->backend_info = -1;
70         close(fd->fd);
71 }
72
73 bool add_listener(struct io_listener *l)
74 {
75         if (!add_fd(&l->fd, POLLIN))
76                 return false;
77         return true;
78 }
79
80 static void adjust_counts(enum io_state state)
81 {
82         if (state == IO_NEXT)
83                 num_next++;
84         else if (state == IO_FINISHED)
85                 num_finished++;
86 }
87
88 static void update_pollevents(struct io_conn *conn)
89 {
90         struct pollfd *pfd = &pollfds[conn->fd.backend_info];
91
92         if (pfd->events)
93                 num_waiting--;
94
95         pfd->events = conn->plan.pollflag;
96         if (conn->duplex) {
97                 int mask = conn->duplex->plan.pollflag;
98                 /* You can't *both* read/write. */
99                 assert(!mask || pfd->events != mask);
100                 pfd->events |= mask;
101         }
102         if (pfd->events)
103                 num_waiting++;
104
105         adjust_counts(conn->plan.state);
106 }
107
108 bool add_conn(struct io_conn *c)
109 {
110         if (!add_fd(&c->fd, c->plan.pollflag))
111                 return false;
112         adjust_counts(c->plan.state);
113         return true;
114 }
115
116 bool add_duplex(struct io_conn *c)
117 {
118         c->fd.backend_info = c->duplex->fd.backend_info;
119         update_pollevents(c);
120         return true;
121 }
122
123 static void del_conn(struct io_conn *conn)
124 {
125         if (conn->finish)
126                 conn->finish(conn, conn->finish_arg);
127         if (timeout_active(conn))
128                 backend_del_timeout(conn);
129         free(conn->timeout);
130         if (conn->duplex) {
131                 /* In case fds[] pointed to the other one. */
132                 fds[conn->fd.backend_info] = &conn->duplex->fd;
133                 conn->duplex->duplex = NULL;
134         } else
135                 del_fd(&conn->fd);
136         if (conn->plan.state == IO_FINISHED)
137                 num_finished--;
138         else if (conn->plan.state == IO_NEXT)
139                 num_next--;
140 }
141
142 void del_listener(struct io_listener *l)
143 {
144         del_fd(&l->fd);
145 }
146
147 static void backend_set_state(struct io_conn *conn, struct io_plan plan)
148 {
149         conn->plan = plan;
150         update_pollevents(conn);
151 }
152
153 void backend_wakeup(struct io_conn *conn)
154 {
155         update_pollevents(conn);
156 }
157
158 static void accept_conn(struct io_listener *l)
159 {
160         int fd = accept(l->fd.fd, NULL, NULL);
161
162         /* FIXME: What to do here? */
163         if (fd < 0)
164                 return;
165         l->init(fd, l->arg);
166 }
167
168 /* It's OK to miss some, as long as we make progress. */
169 static void finish_and_next(bool finished_only)
170 {
171         unsigned int i;
172
173         for (i = 0; !io_loop_return && i < num_fds; i++) {
174                 struct io_conn *c, *duplex;
175
176                 if (!num_finished) {
177                         if (finished_only || num_next == 0)
178                                 break;
179                 }
180                 if (fds[i]->listener)
181                         continue;
182                 c = (void *)fds[i];
183                 for (duplex = c->duplex; c; c = duplex, duplex = NULL) {
184                         if (c->plan.state == IO_FINISHED) {
185                                 del_conn(c);
186                                 free(c);
187                                 i--;
188                         } else if (!finished_only && c->plan.state == IO_NEXT) {
189                                 backend_set_state(c, c->plan.next(c, c->plan.next_arg));
190                                 num_next--;
191                         }
192                 }
193         }
194 }
195
196 static void ready(struct io_conn *c)
197 {
198         backend_set_state(c, do_ready(c));
199 }
200
201 void backend_add_timeout(struct io_conn *conn, struct timespec duration)
202 {
203         if (!timeouts.base)
204                 timers_init(&timeouts, time_now());
205         timer_add(&timeouts, &conn->timeout->timer,
206                   time_add(time_now(), duration));
207         conn->timeout->conn = conn;
208 }
209
210 void backend_del_timeout(struct io_conn *conn)
211 {
212         assert(conn->timeout->conn == conn);
213         timer_del(&timeouts, &conn->timeout->timer);
214         conn->timeout->conn = NULL;
215 }
216
217 /* This is the main loop. */
218 void *io_loop(void)
219 {
220         void *ret;
221
222         while (!io_loop_return) {
223                 int i, r, timeout = INT_MAX;
224                 struct timespec now;
225
226                 if (timeouts.base) {
227                         struct timespec first;
228                         struct list_head expired;
229                         struct io_timeout *t;
230
231                         now = time_now();
232
233                         /* Call functions for expired timers. */
234                         timers_expire(&timeouts, now, &expired);
235                         while ((t = list_pop(&expired, struct io_timeout, timer.list))) {
236                                 struct io_conn *conn = t->conn;
237                                 /* Clear, in case timer re-adds */
238                                 t->conn = NULL;
239                                 backend_set_state(conn, t->next(conn, t->next_arg));
240                         }
241
242                         /* Now figure out how long to wait for the next one. */
243                         if (timer_earliest(&timeouts, &first)) {
244                                 uint64_t f = time_to_msec(time_sub(first, now));
245                                 if (f < INT_MAX)
246                                         timeout = f;
247                         }
248                 }
249
250                 if (num_finished || num_next) {
251                         finish_and_next(false);
252                         /* Could have started/finished more. */
253                         continue;
254                 }
255
256                 if (num_fds == 0)
257                         break;
258
259                 /* You can't tell them all to go to sleep! */
260                 assert(num_waiting);
261
262                 r = poll(pollfds, num_fds, timeout);
263                 if (r < 0)
264                         break;
265
266                 for (i = 0; i < num_fds && !io_loop_return; i++) {
267                         struct io_conn *c = (void *)fds[i];
268                         int events = pollfds[i].revents;
269
270                         if (r == 0)
271                                 break;
272
273                         if (fds[i]->listener) {
274                                 if (events & POLLIN) {
275                                         accept_conn((void *)c);
276                                         r--;
277                                 }
278                         } else if (events & (POLLIN|POLLOUT)) {
279                                 r--;
280                                 if (c->duplex) {
281                                         int mask = c->duplex->plan.pollflag;
282                                         if (events & mask) {
283                                                 ready(c->duplex);
284                                                 events &= ~mask;
285                                                 if (!(events&(POLLIN|POLLOUT)))
286                                                         continue;
287                                         }
288                                 }
289                                 ready(c);
290                         } else if (events & POLLHUP) {
291                                 r--;
292                                 backend_set_state(c, io_close(c, NULL));
293                                 if (c->duplex)
294                                         backend_set_state(c->duplex,
295                                                           io_close(c->duplex,
296                                                                    NULL));
297                         }
298                 }
299         }
300
301         while (num_finished)
302                 finish_and_next(true);
303
304         ret = io_loop_return;
305         io_loop_return = NULL;
306         return ret;
307 }