1 /* Licensed under LGPLv2.1+ - see LICENSE file for details */
5 #include <sys/socket.h>
16 struct io_alloc io_alloc = {
21 /* Set to skip the next plan. */
23 /* The current connection to apply plan to. */
24 struct io_conn *current;
25 /* User-defined function to select which connection(s) to debug. */
26 bool (*io_debug_conn)(struct io_conn *conn);
28 struct io_plan io_debug(struct io_plan plan)
30 struct io_conn *ready = NULL;
32 if (io_plan_nodebug) {
33 io_plan_nodebug = false;
37 if (!current || !doing_debug_on(current))
41 backend_plan_changed(current);
43 /* Call back into the loop immediately. */
44 io_loop_return = do_io_loop(&ready);
48 if (!ready->plan.next) {
49 /* Call finish function immediately. */
51 errno = ready->plan.u1.s;
52 ready->finish(ready, ready->finish_arg);
55 backend_del_conn(ready);
57 /* Calls back in itself, via io_debug_io(). */
58 if (ready->plan.io(ready->fd.fd, &ready->plan) != 2)
64 /* Return a do-nothing plan, so backend_plan_changed in
65 * io_ready doesn't do anything (it's already been called). */
66 return io_wait_(NULL, (void *)1, NULL);
69 int io_debug_io(int ret)
71 /* Cache it for debugging; current changes. */
72 struct io_conn *conn = current;
73 int saved_errno = errno;
75 if (!doing_debug_on(conn))
78 /* These will all go linearly through the io_debug() path above. */
81 /* This will call io_debug above. */
85 case 0: /* Keep going with plan. */
88 case 1: /* Done: get next plan. */
89 if (timeout_active(conn))
90 backend_del_timeout(conn);
91 /* In case they call io_duplex, clear our poll flags so
92 * both sides don't seem to be both doing read or write
93 * (See assert(!mask || pfd->events != mask) in poll.c) */
94 conn->plan.pollflag = 0;
95 conn->plan.next(conn, conn->plan.next_arg);
101 /* Normally-invalid value, used for sanity check. */
105 /* Counterpart to io_plan_no_debug(), called in macros in io.h */
106 static void io_plan_debug_again(void)
108 io_plan_nodebug = false;
111 static void io_plan_debug_again(void)
116 struct io_listener *io_new_listener_(int fd,
117 void (*init)(int fd, void *arg),
120 struct io_listener *l = io_alloc.alloc(sizeof(*l));
125 l->fd.listener = true;
129 if (!add_listener(l)) {
136 void io_close_listener(struct io_listener *l)
143 struct io_conn *io_new_conn_(int fd, struct io_plan plan)
145 struct io_conn *conn = io_alloc.alloc(sizeof(*conn));
147 io_plan_debug_again();
152 conn->fd.listener = false;
156 conn->finish_arg = NULL;
158 conn->timeout = NULL;
159 if (!add_conn(conn)) {
166 void io_set_finish_(struct io_conn *conn,
167 void (*finish)(struct io_conn *, void *),
170 conn->finish = finish;
171 conn->finish_arg = arg;
174 struct io_conn *io_duplex_(struct io_conn *old, struct io_plan plan)
176 struct io_conn *conn;
178 io_plan_debug_again();
180 assert(!old->duplex);
182 conn = io_alloc.alloc(sizeof(*conn));
186 conn->fd.listener = false;
187 conn->fd.fd = old->fd.fd;
191 conn->finish_arg = NULL;
192 conn->timeout = NULL;
193 if (!add_duplex(conn)) {
201 bool io_timeout_(struct io_conn *conn, struct timerel t,
202 struct io_plan (*cb)(struct io_conn *, void *), void *arg)
206 if (!conn->timeout) {
207 conn->timeout = io_alloc.alloc(sizeof(*conn->timeout));
211 assert(!timeout_active(conn));
213 conn->timeout->next = cb;
214 conn->timeout->next_arg = arg;
215 backend_add_timeout(conn, t);
219 /* Always done: call the next thing. */
220 static int do_always(int fd, struct io_plan *plan)
225 struct io_plan io_always_(struct io_plan (*cb)(struct io_conn *, void *),
234 plan.pollflag = POLLALWAYS;
239 /* Returns true if we're finished. */
240 static int do_write(int fd, struct io_plan *plan)
242 ssize_t ret = write(fd, plan->u1.cp, plan->u2.s);
244 return io_debug_io(-1);
248 return io_debug_io(plan->u2.s == 0);
251 /* Queue some data to be written. */
252 struct io_plan io_write_(const void *data, size_t len,
253 struct io_plan (*cb)(struct io_conn *, void *),
261 return io_always_(cb, arg);
263 plan.u1.const_vp = data;
268 plan.pollflag = POLLOUT;
273 static int do_read(int fd, struct io_plan *plan)
275 ssize_t ret = read(fd, plan->u1.cp, plan->u2.s);
277 return io_debug_io(-1);
281 return io_debug_io(plan->u2.s == 0);
284 /* Queue a request to read into a buffer. */
285 struct io_plan io_read_(void *data, size_t len,
286 struct io_plan (*cb)(struct io_conn *, void *),
294 return io_always_(cb, arg);
302 plan.pollflag = POLLIN;
307 static int do_read_partial(int fd, struct io_plan *plan)
309 ssize_t ret = read(fd, plan->u1.cp, *(size_t *)plan->u2.vp);
311 return io_debug_io(-1);
313 *(size_t *)plan->u2.vp = ret;
314 return io_debug_io(1);
317 /* Queue a partial request to read into a buffer. */
318 struct io_plan io_read_partial_(void *data, size_t *len,
319 struct io_plan (*cb)(struct io_conn *, void *),
327 return io_always_(cb, arg);
331 plan.io = do_read_partial;
334 plan.pollflag = POLLIN;
339 static int do_write_partial(int fd, struct io_plan *plan)
341 ssize_t ret = write(fd, plan->u1.cp, *(size_t *)plan->u2.vp);
343 return io_debug_io(-1);
345 *(size_t *)plan->u2.vp = ret;
346 return io_debug_io(1);
349 /* Queue a partial write request. */
350 struct io_plan io_write_partial_(const void *data, size_t *len,
351 struct io_plan (*cb)(struct io_conn*, void *),
359 return io_always_(cb, arg);
361 plan.u1.const_vp = data;
363 plan.io = do_write_partial;
366 plan.pollflag = POLLOUT;
371 static int already_connected(int fd, struct io_plan *plan)
373 return io_debug_io(1);
376 static int do_connect(int fd, struct io_plan *plan)
379 socklen_t len = sizeof(err);
381 /* Has async connect finished? */
382 ret = getsockopt(fd, SOL_SOCKET, SO_ERROR, &err, &len);
387 /* Restore blocking if it was initially. */
388 fcntl(fd, F_SETFD, plan->u1.s);
394 struct io_plan io_connect_(int fd, const struct addrinfo *addr,
395 struct io_plan (*cb)(struct io_conn*, void *),
405 /* Save old flags, set nonblock if not already. */
406 plan.u1.s = fcntl(fd, F_GETFL);
407 fcntl(fd, F_SETFL, plan.u1.s | O_NONBLOCK);
409 /* Immediate connect can happen. */
410 if (connect(fd, addr->ai_addr, addr->ai_addrlen) == 0) {
411 /* Dummy will be called immediately. */
412 plan.pollflag = POLLOUT;
413 plan.io = already_connected;
415 if (errno != EINPROGRESS)
418 plan.pollflag = POLLIN;
419 plan.io = do_connect;
424 struct io_plan io_wait_(const void *wait,
425 struct io_plan (*cb)(struct io_conn *, void*),
436 plan.u1.const_vp = wait;
441 void io_wake(const void *wait)
443 backend_wait_changed(wait);
446 void io_ready(struct io_conn *conn)
448 /* Beware io_close_other! */
449 if (!conn->plan.next)
453 switch (conn->plan.io(conn->fd.fd, &conn->plan)) {
454 case -1: /* Failure means a new plan: close up. */
455 conn->plan = io_close();
456 backend_plan_changed(conn);
458 case 0: /* Keep going with plan. */
460 case 1: /* Done: get next plan. */
461 if (timeout_active(conn))
462 backend_del_timeout(conn);
463 /* In case they call io_duplex, clear our poll flags so
464 * both sides don't seem to be both doing read or write
465 * (See assert(!mask || pfd->events != mask) in poll.c) */
466 conn->plan.pollflag = 0;
467 conn->plan = conn->plan.next(conn, conn->plan.next_arg);
468 backend_plan_changed(conn);
473 /* Close the connection, we're done. */
474 struct io_plan io_close_(void)
479 /* This means we're closing. */
486 struct io_plan io_close_cb(struct io_conn *conn, void *arg)
491 void io_close_other(struct io_conn *conn)
493 /* Don't close if already closing! */
494 if (conn->plan.next) {
495 conn->plan = io_close_();
496 backend_plan_changed(conn);
500 /* Exit the loop, returning this (non-NULL) arg. */
501 struct io_plan io_break_(void *ret, struct io_plan plan)
503 io_plan_debug_again();
506 io_loop_return = ret;
511 static struct io_plan io_never_called(struct io_conn *conn, void *arg)
516 struct io_plan io_never(void)
518 return io_always_(io_never_called, NULL);
521 int io_conn_fd(const struct io_conn *conn)
526 void io_set_alloc(void *(*allocfn)(size_t size),
527 void *(*reallocfn)(void *ptr, size_t size),
528 void (*freefn)(void *ptr))
530 io_alloc.alloc = allocfn;
531 io_alloc.realloc = reallocfn;
532 io_alloc.free = freefn;