1 /* Licensed under LGPLv2.1+ - see LICENSE file for details */
5 #include <sys/socket.h>
13 #include <ccan/container_of/container_of.h>
17 struct io_plan io_conn_freed;
19 struct io_listener *io_new_listener_(const tal_t *ctx, int fd,
20 struct io_plan *(*init)(struct io_conn *,
24 struct io_listener *l = tal(ctx, struct io_listener);
28 l->fd.listener = true;
38 void io_close_listener(struct io_listener *l)
43 static struct io_plan *io_never_called(struct io_conn *conn, void *arg)
48 /* Returns false if conn was freed. */
49 static bool next_plan(struct io_conn *conn, struct io_plan *plan)
51 struct io_plan *(*next)(struct io_conn *, void *arg);
55 plan->status = IO_UNSET;
57 plan->next = io_never_called;
59 plan = next(conn, plan->next_arg);
61 if (plan == &io_conn_freed)
64 /* It should have set a plan inside this conn (or duplex) */
65 assert(plan == &conn->plan[IO_IN]
66 || plan == &conn->plan[IO_OUT]
67 || plan == &conn->plan[2]);
68 assert(conn->plan[IO_IN].status != IO_UNSET
69 || conn->plan[IO_OUT].status != IO_UNSET);
71 backend_new_plan(conn);
75 bool io_fd_block(int fd, bool block)
77 int flags = fcntl(fd, F_GETFL);
87 return fcntl(fd, F_SETFL, flags) != -1;
90 struct io_conn *io_new_conn_(const tal_t *ctx, int fd,
91 struct io_plan *(*init)(struct io_conn *, void *),
94 struct io_conn *conn = tal(ctx, struct io_conn);
99 conn->fd.listener = false;
102 conn->finish_arg = NULL;
103 list_node_init(&conn->always);
106 return tal_free(conn);
108 /* Keep our I/O async. */
109 io_fd_block(fd, false);
111 /* We start with out doing nothing, and in doing our init. */
112 conn->plan[IO_OUT].status = IO_UNSET;
114 conn->plan[IO_IN].next = init;
115 conn->plan[IO_IN].next_arg = arg;
116 if (!next_plan(conn, &conn->plan[IO_IN]))
122 void io_set_finish_(struct io_conn *conn,
123 void (*finish)(struct io_conn *, void *),
126 conn->finish = finish;
127 conn->finish_arg = arg;
130 struct io_plan_arg *io_plan_arg(struct io_conn *conn, enum io_direction dir)
132 assert(conn->plan[dir].status == IO_UNSET);
134 conn->plan[dir].status = IO_POLLING;
135 return &conn->plan[dir].arg;
138 static struct io_plan *set_always(struct io_conn *conn,
139 enum io_direction dir,
140 struct io_plan *(*next)(struct io_conn *,
144 struct io_plan *plan = &conn->plan[dir];
146 plan->status = IO_ALWAYS;
147 backend_new_always(conn);
148 return io_set_plan(conn, dir, NULL, next, arg);
151 static struct io_plan *io_always_dir(struct io_conn *conn,
152 enum io_direction dir,
153 struct io_plan *(*next)(struct io_conn *,
157 return set_always(conn, dir, next, arg);
160 struct io_plan *io_always_(struct io_conn *conn,
161 struct io_plan *(*next)(struct io_conn *, void *),
164 return io_always_dir(conn, IO_IN, next, arg);
167 struct io_plan *io_out_always_(struct io_conn *conn,
168 struct io_plan *(*next)(struct io_conn *,
172 return io_always_dir(conn, IO_OUT, next, arg);
175 static int do_write(int fd, struct io_plan_arg *arg)
177 ssize_t ret = write(fd, arg->u1.cp, arg->u2.s);
183 return arg->u2.s == 0;
186 /* Queue some data to be written. */
187 struct io_plan *io_write_(struct io_conn *conn, const void *data, size_t len,
188 struct io_plan *(*next)(struct io_conn *, void *),
191 struct io_plan_arg *arg = io_plan_arg(conn, IO_OUT);
194 return set_always(conn, IO_OUT, next, next_arg);
196 arg->u1.const_vp = data;
199 return io_set_plan(conn, IO_OUT, do_write, next, next_arg);
202 static int do_read(int fd, struct io_plan_arg *arg)
204 ssize_t ret = read(fd, arg->u1.cp, arg->u2.s);
210 return arg->u2.s == 0;
213 /* Queue a request to read into a buffer. */
214 struct io_plan *io_read_(struct io_conn *conn,
215 void *data, size_t len,
216 struct io_plan *(*next)(struct io_conn *, void *),
219 struct io_plan_arg *arg = io_plan_arg(conn, IO_IN);
222 return set_always(conn, IO_IN, next, next_arg);
227 return io_set_plan(conn, IO_IN, do_read, next, next_arg);
230 static int do_read_partial(int fd, struct io_plan_arg *arg)
232 ssize_t ret = read(fd, arg->u1.cp, *(size_t *)arg->u2.vp);
236 *(size_t *)arg->u2.vp = ret;
240 /* Queue a partial request to read into a buffer. */
241 struct io_plan *io_read_partial_(struct io_conn *conn,
242 void *data, size_t maxlen, size_t *len,
243 struct io_plan *(*next)(struct io_conn *,
247 struct io_plan_arg *arg = io_plan_arg(conn, IO_IN);
250 return set_always(conn, IO_IN, next, next_arg);
253 /* We store the max len in here temporarily. */
257 return io_set_plan(conn, IO_IN, do_read_partial, next, next_arg);
260 static int do_write_partial(int fd, struct io_plan_arg *arg)
262 ssize_t ret = write(fd, arg->u1.cp, *(size_t *)arg->u2.vp);
266 *(size_t *)arg->u2.vp = ret;
270 /* Queue a partial write request. */
271 struct io_plan *io_write_partial_(struct io_conn *conn,
272 const void *data, size_t maxlen, size_t *len,
273 struct io_plan *(*next)(struct io_conn *,
277 struct io_plan_arg *arg = io_plan_arg(conn, IO_OUT);
280 return set_always(conn, IO_OUT, next, next_arg);
282 arg->u1.const_vp = data;
283 /* We store the max len in here temporarily. */
287 return io_set_plan(conn, IO_OUT, do_write_partial, next, next_arg);
290 static int do_connect(int fd, struct io_plan_arg *arg)
293 socklen_t len = sizeof(err);
295 /* Has async connect finished? */
296 ret = getsockopt(fd, SOL_SOCKET, SO_ERROR, &err, &len);
302 } else if (err == EINPROGRESS)
309 struct io_plan *io_connect_(struct io_conn *conn, const struct addrinfo *addr,
310 struct io_plan *(*next)(struct io_conn *, void *),
313 int fd = io_conn_fd(conn);
315 /* We don't actually need the arg, but we need it polling. */
316 io_plan_arg(conn, IO_OUT);
318 /* Note that io_new_conn() will make fd O_NONBLOCK */
320 /* Immediate connect can happen. */
321 if (connect(fd, addr->ai_addr, addr->ai_addrlen) == 0)
322 return set_always(conn, IO_OUT, next, next_arg);
324 if (errno != EINPROGRESS)
325 return io_close(conn);
327 return io_set_plan(conn, IO_OUT, do_connect, next, next_arg);
330 static struct io_plan *io_wait_dir(struct io_conn *conn,
332 enum io_direction dir,
333 struct io_plan *(*next)(struct io_conn *,
337 struct io_plan_arg *arg = io_plan_arg(conn, dir);
338 arg->u1.const_vp = wait;
340 conn->plan[dir].status = IO_WAITING;
342 return io_set_plan(conn, dir, NULL, next, next_arg);
345 struct io_plan *io_wait_(struct io_conn *conn,
347 struct io_plan *(*next)(struct io_conn *, void *),
350 return io_wait_dir(conn, wait, IO_IN, next, next_arg);
353 struct io_plan *io_out_wait_(struct io_conn *conn,
355 struct io_plan *(*next)(struct io_conn *, void *),
358 return io_wait_dir(conn, wait, IO_OUT, next, next_arg);
361 void io_wake(const void *wait)
366 /* Returns false if this has been freed. */
367 static bool do_plan(struct io_conn *conn, struct io_plan *plan)
369 /* We shouldn't have polled for this event if this wasn't true! */
370 assert(plan->status == IO_POLLING);
372 switch (plan->io(conn->fd.fd, &plan->arg)) {
379 return next_plan(conn, plan);
381 /* IO should only return -1, 0 or 1 */
386 void io_ready(struct io_conn *conn, int pollflags)
388 if (pollflags & POLLIN)
389 if (!do_plan(conn, &conn->plan[IO_IN]))
392 if (pollflags & POLLOUT)
393 do_plan(conn, &conn->plan[IO_OUT]);
396 void io_do_always(struct io_conn *conn)
398 /* There's a corner case where the in next_plan wakes up the
399 * out, placing it in IO_ALWAYS and we end up processing it immediately,
400 * only to leave it in the always list.
402 * Yet we can't just process one, in case they are both supposed
403 * to be done, so grab state beforehand.
405 bool always_out = (conn->plan[IO_OUT].status == IO_ALWAYS);
407 if (conn->plan[IO_IN].status == IO_ALWAYS)
408 if (!next_plan(conn, &conn->plan[IO_IN]))
412 /* You can't *unalways* a conn (except by freeing, in which
413 * case next_plan() returned false */
414 assert(conn->plan[IO_OUT].status == IO_ALWAYS);
415 next_plan(conn, &conn->plan[IO_OUT]);
419 void io_do_wakeup(struct io_conn *conn, enum io_direction dir)
421 struct io_plan *plan = &conn->plan[dir];
423 assert(plan->status == IO_WAITING);
425 set_always(conn, dir, plan->next, plan->next_arg);
428 /* Close the connection, we're done. */
429 struct io_plan *io_close(struct io_conn *conn)
432 return &io_conn_freed;
435 struct io_plan *io_close_cb(struct io_conn *conn, void *next_arg)
437 return io_close(conn);
440 struct io_plan *io_close_taken_fd(struct io_conn *conn)
442 io_fd_block(conn->fd.fd, true);
444 cleanup_conn_without_close(conn);
445 return io_close(conn);
448 /* Exit the loop, returning this (non-NULL) arg. */
449 void io_break(const void *ret)
452 io_loop_return = (void *)ret;
455 struct io_plan *io_never(struct io_conn *conn, void *unused)
457 return io_always(conn, io_never_called, NULL);
460 int io_conn_fd(const struct io_conn *conn)
465 struct io_plan *io_duplex(struct io_conn *conn,
466 struct io_plan *in_plan, struct io_plan *out_plan)
468 assert(conn == container_of(in_plan, struct io_conn, plan[IO_IN]));
469 /* in_plan must be conn->plan[IO_IN], out_plan must be [IO_OUT] */
470 assert(out_plan == in_plan + 1);
474 struct io_plan *io_halfclose(struct io_conn *conn)
476 /* Both unset? OK. */
477 if (conn->plan[IO_IN].status == IO_UNSET
478 && conn->plan[IO_OUT].status == IO_UNSET)
479 return io_close(conn);
481 /* We leave this unset then. */
482 if (conn->plan[IO_IN].status == IO_UNSET)
483 return &conn->plan[IO_IN];
485 return &conn->plan[IO_OUT];
488 struct io_plan *io_set_plan(struct io_conn *conn, enum io_direction dir,
489 int (*io)(int fd, struct io_plan_arg *arg),
490 struct io_plan *(*next)(struct io_conn *, void *),
493 struct io_plan *plan = &conn->plan[dir];
497 plan->next_arg = next_arg;
498 assert(next != NULL);
503 bool io_flush_sync(struct io_conn *conn)
505 struct io_plan *plan = &conn->plan[IO_OUT];
508 /* Not writing? Nothing to do. */
509 if (plan->status != IO_POLLING)
512 /* Synchronous please. */
513 io_fd_block(io_conn_fd(conn), true);
516 switch (plan->io(conn->fd.fd, &plan->arg)) {
520 /* Incomplete, try again. */
525 /* In case they come back. */
526 set_always(conn, IO_OUT, plan->next, plan->next_arg);
529 /* IO should only return -1, 0 or 1 */
533 io_fd_block(io_conn_fd(conn), false);