1 /* Licensed under LGPLv2.1+ - see LICENSE file for details */
5 #include <sys/socket.h>
16 struct io_alloc io_alloc = {
21 /* Set to skip the next plan. */
23 /* The current connection to apply plan to. */
24 struct io_conn *current;
25 /* User-defined function to select which connection(s) to debug. */
26 bool (*io_debug_conn)(struct io_conn *conn);
27 /* Set when we wake up an connection we are debugging. */
30 struct io_plan io_debug(struct io_plan plan)
32 struct io_conn *ready = NULL;
34 if (io_plan_nodebug) {
35 io_plan_nodebug = false;
39 if (!current || !doing_debug_on(current)) {
44 io_debug_wakeup = false;
46 backend_plan_changed(current);
48 /* Call back into the loop immediately. */
49 io_loop_return = do_io_loop(&ready);
53 if (!ready->plan.next) {
54 /* Call finish function immediately. */
56 errno = ready->plan.u1.s;
57 ready->finish(ready, ready->finish_arg);
60 backend_del_conn(ready);
62 /* Calls back in itself, via io_debug_io(). */
63 if (ready->plan.io(ready->fd.fd, &ready->plan) != 2)
69 /* Return a do-nothing plan, so backend_plan_changed in
70 * io_ready doesn't do anything (it's already been called). */
74 int io_debug_io(int ret)
76 /* Cache it for debugging; current changes. */
77 struct io_conn *conn = current;
78 int saved_errno = errno;
80 if (!doing_debug_on(conn))
83 /* These will all go linearly through the io_debug() path above. */
86 /* This will call io_debug above. */
90 case 0: /* Keep going with plan. */
93 case 1: /* Done: get next plan. */
94 if (timeout_active(conn))
95 backend_del_timeout(conn);
96 /* In case they call io_duplex, clear our poll flags so
97 * both sides don't seem to be both doing read or write
98 * (See assert(!mask || pfd->events != mask) in poll.c) */
99 conn->plan.pollflag = 0;
100 conn->plan.next(conn, conn->plan.next_arg);
106 /* Normally-invalid value, used for sanity check. */
110 static void debug_io_wake(struct io_conn *conn)
112 /* We want linear if we wake a debugged connection, too. */
113 if (io_debug_conn && io_debug_conn(conn))
114 io_debug_wakeup = true;
117 /* Counterpart to io_plan_no_debug(), called in macros in io.h */
118 static void io_plan_debug_again(void)
120 io_plan_nodebug = false;
123 static void debug_io_wake(struct io_conn *conn)
126 static void io_plan_debug_again(void)
131 struct io_listener *io_new_listener_(int fd,
132 void (*init)(int fd, void *arg),
135 struct io_listener *l = io_alloc.alloc(sizeof(*l));
140 l->fd.listener = true;
144 if (!add_listener(l)) {
151 void io_close_listener(struct io_listener *l)
158 struct io_conn *io_new_conn_(int fd, struct io_plan plan)
160 struct io_conn *conn = io_alloc.alloc(sizeof(*conn));
162 io_plan_debug_again();
167 conn->fd.listener = false;
171 conn->finish_arg = NULL;
173 conn->timeout = NULL;
174 if (!add_conn(conn)) {
181 void io_set_finish_(struct io_conn *conn,
182 void (*finish)(struct io_conn *, void *),
185 conn->finish = finish;
186 conn->finish_arg = arg;
189 struct io_conn *io_duplex_(struct io_conn *old, struct io_plan plan)
191 struct io_conn *conn;
193 io_plan_debug_again();
195 assert(!old->duplex);
197 conn = io_alloc.alloc(sizeof(*conn));
201 conn->fd.listener = false;
202 conn->fd.fd = old->fd.fd;
206 conn->finish_arg = NULL;
207 conn->timeout = NULL;
208 if (!add_duplex(conn)) {
216 bool io_timeout_(struct io_conn *conn, struct timespec ts,
217 struct io_plan (*cb)(struct io_conn *, void *), void *arg)
221 if (!conn->timeout) {
222 conn->timeout = io_alloc.alloc(sizeof(*conn->timeout));
226 assert(!timeout_active(conn));
228 conn->timeout->next = cb;
229 conn->timeout->next_arg = arg;
230 backend_add_timeout(conn, ts);
234 /* Always done: call the next thing. */
235 static int do_always(int fd, struct io_plan *plan)
240 struct io_plan io_always_(struct io_plan (*cb)(struct io_conn *, void *),
249 plan.pollflag = POLLALWAYS;
254 /* Returns true if we're finished. */
255 static int do_write(int fd, struct io_plan *plan)
257 ssize_t ret = write(fd, plan->u1.cp, plan->u2.s);
259 return io_debug_io(-1);
263 return io_debug_io(plan->u2.s == 0);
266 /* Queue some data to be written. */
267 struct io_plan io_write_(const void *data, size_t len,
268 struct io_plan (*cb)(struct io_conn *, void *),
276 return io_always_(cb, arg);
278 plan.u1.const_vp = data;
283 plan.pollflag = POLLOUT;
288 static int do_read(int fd, struct io_plan *plan)
290 ssize_t ret = read(fd, plan->u1.cp, plan->u2.s);
292 return io_debug_io(-1);
296 return io_debug_io(plan->u2.s == 0);
299 /* Queue a request to read into a buffer. */
300 struct io_plan io_read_(void *data, size_t len,
301 struct io_plan (*cb)(struct io_conn *, void *),
309 return io_always_(cb, arg);
317 plan.pollflag = POLLIN;
322 static int do_read_partial(int fd, struct io_plan *plan)
324 ssize_t ret = read(fd, plan->u1.cp, *(size_t *)plan->u2.vp);
326 return io_debug_io(-1);
328 *(size_t *)plan->u2.vp = ret;
329 return io_debug_io(1);
332 /* Queue a partial request to read into a buffer. */
333 struct io_plan io_read_partial_(void *data, size_t *len,
334 struct io_plan (*cb)(struct io_conn *, void *),
342 return io_always_(cb, arg);
346 plan.io = do_read_partial;
349 plan.pollflag = POLLIN;
354 static int do_write_partial(int fd, struct io_plan *plan)
356 ssize_t ret = write(fd, plan->u1.cp, *(size_t *)plan->u2.vp);
358 return io_debug_io(-1);
360 *(size_t *)plan->u2.vp = ret;
361 return io_debug_io(1);
364 /* Queue a partial write request. */
365 struct io_plan io_write_partial_(const void *data, size_t *len,
366 struct io_plan (*cb)(struct io_conn*, void *),
374 return io_always_(cb, arg);
376 plan.u1.const_vp = data;
378 plan.io = do_write_partial;
381 plan.pollflag = POLLOUT;
386 static int already_connected(int fd, struct io_plan *plan)
388 return io_debug_io(1);
391 static int do_connect(int fd, struct io_plan *plan)
394 socklen_t len = sizeof(err);
396 /* Has async connect finished? */
397 ret = getsockopt(fd, SOL_SOCKET, SO_ERROR, &err, &len);
402 /* Restore blocking if it was initially. */
403 fcntl(fd, F_SETFD, plan->u1.s);
409 struct io_plan io_connect_(int fd, const struct addrinfo *addr,
410 struct io_plan (*cb)(struct io_conn*, void *),
420 /* Save old flags, set nonblock if not already. */
421 plan.u1.s = fcntl(fd, F_GETFD);
422 fcntl(fd, F_SETFD, plan.u1.s | O_NONBLOCK);
424 /* Immediate connect can happen. */
425 if (connect(fd, addr->ai_addr, addr->ai_addrlen) == 0) {
426 /* Dummy will be called immediately. */
427 plan.pollflag = POLLOUT;
428 plan.io = already_connected;
430 if (errno != EINPROGRESS)
433 plan.pollflag = POLLIN;
434 plan.io = do_connect;
439 struct io_plan io_idle_(void)
445 /* Never called (overridden by io_wake), but NULL means closing */
446 plan.next = (void *)io_idle_;
451 bool io_is_idle(const struct io_conn *conn)
453 return conn->plan.io == NULL;
456 void io_wake_(struct io_conn *conn, struct io_plan plan)
459 io_plan_debug_again();
461 /* It might be closing, but we haven't called its finish() yet. */
462 if (!conn->plan.next)
464 /* It was idle, right? */
465 assert(!conn->plan.io);
467 backend_plan_changed(conn);
472 void io_ready(struct io_conn *conn)
474 /* Beware io_close_other! */
475 if (!conn->plan.next)
479 switch (conn->plan.io(conn->fd.fd, &conn->plan)) {
480 case -1: /* Failure means a new plan: close up. */
481 conn->plan = io_close();
482 backend_plan_changed(conn);
484 case 0: /* Keep going with plan. */
486 case 1: /* Done: get next plan. */
487 if (timeout_active(conn))
488 backend_del_timeout(conn);
489 /* In case they call io_duplex, clear our poll flags so
490 * both sides don't seem to be both doing read or write
491 * (See assert(!mask || pfd->events != mask) in poll.c) */
492 conn->plan.pollflag = 0;
493 conn->plan = conn->plan.next(conn, conn->plan.next_arg);
494 backend_plan_changed(conn);
499 /* Close the connection, we're done. */
500 struct io_plan io_close_(void)
505 /* This means we're closing. */
512 struct io_plan io_close_cb(struct io_conn *conn, void *arg)
517 void io_close_other(struct io_conn *conn)
519 conn->plan = io_close_();
520 backend_plan_changed(conn);
523 /* Exit the loop, returning this (non-NULL) arg. */
524 struct io_plan io_break_(void *ret, struct io_plan plan)
526 io_plan_debug_again();
529 io_loop_return = ret;
534 static struct io_plan io_never_called(struct io_conn *conn, void *arg)
539 struct io_plan io_never(void)
541 return io_always_(io_never_called, NULL);
544 int io_conn_fd(const struct io_conn *conn)
549 void io_set_alloc(void *(*allocfn)(size_t size),
550 void *(*reallocfn)(void *ptr, size_t size),
551 void (*freefn)(void *ptr))
553 io_alloc.alloc = allocfn;
554 io_alloc.realloc = reallocfn;
555 io_alloc.free = freefn;