1 /* Licensed under LGPLv2.1+ - see LICENSE file for details */
5 #include <sys/socket.h>
13 #include <ccan/container_of/container_of.h>
17 struct io_plan io_conn_freed;
19 struct io_listener *io_new_listener_(const tal_t *ctx, int fd,
20 struct io_plan *(*init)(struct io_conn *,
24 struct io_listener *l = tal(ctx, struct io_listener);
28 l->fd.listener = true;
38 void io_close_listener(struct io_listener *l)
43 static struct io_plan *io_never_called(struct io_conn *conn, void *arg)
48 /* Returns false if conn was freed. */
49 static bool next_plan(struct io_conn *conn, struct io_plan *plan)
51 struct io_plan *(*next)(struct io_conn *, void *arg);
55 plan->status = IO_UNSET;
57 plan->next = io_never_called;
59 plan = next(conn, plan->next_arg);
61 if (plan == &io_conn_freed)
64 assert(plan == &conn->plan[plan->dir]);
65 assert(conn->plan[IO_IN].status != IO_UNSET
66 || conn->plan[IO_OUT].status != IO_UNSET);
68 backend_new_plan(conn);
72 bool io_fd_block(int fd, bool block)
74 int flags = fcntl(fd, F_GETFL);
84 return fcntl(fd, F_SETFL, flags) != -1;
87 struct io_conn *io_new_conn_(const tal_t *ctx, int fd,
88 struct io_plan *(*init)(struct io_conn *, void *),
91 struct io_conn *conn = tal(ctx, struct io_conn);
96 conn->fd.listener = false;
99 conn->finish_arg = NULL;
102 return tal_free(conn);
104 /* Keep our I/O async. */
105 io_fd_block(fd, false);
107 /* So we can get back from plan -> conn later */
108 conn->plan[IO_OUT].dir = IO_OUT;
109 conn->plan[IO_IN].dir = IO_IN;
111 /* We start with out doing nothing, and in doing our init. */
112 conn->plan[IO_OUT].status = IO_UNSET;
114 conn->plan[IO_IN].next = init;
115 conn->plan[IO_IN].next_arg = arg;
116 if (!next_plan(conn, &conn->plan[IO_IN]))
122 void io_set_finish_(struct io_conn *conn,
123 void (*finish)(struct io_conn *, void *),
126 conn->finish = finish;
127 conn->finish_arg = arg;
130 struct io_plan_arg *io_plan_arg(struct io_conn *conn, enum io_direction dir)
132 assert(conn->plan[dir].status == IO_UNSET);
134 conn->plan[dir].status = IO_POLLING_NOTSTARTED;
135 return &conn->plan[dir].arg;
138 static struct io_plan *set_always(struct io_conn *conn,
139 enum io_direction dir,
140 struct io_plan *(*next)(struct io_conn *,
144 struct io_plan *plan = &conn->plan[dir];
146 plan->status = IO_ALWAYS;
147 /* Only happens on OOM, and only with non-default tal_backend. */
148 if (!backend_new_always(plan))
150 return io_set_plan(conn, dir, NULL, next, arg);
153 static struct io_plan *io_always_dir(struct io_conn *conn,
154 enum io_direction dir,
155 struct io_plan *(*next)(struct io_conn *,
159 return set_always(conn, dir, next, arg);
162 struct io_plan *io_always_(struct io_conn *conn,
163 struct io_plan *(*next)(struct io_conn *, void *),
166 return io_always_dir(conn, IO_IN, next, arg);
169 struct io_plan *io_out_always_(struct io_conn *conn,
170 struct io_plan *(*next)(struct io_conn *,
174 return io_always_dir(conn, IO_OUT, next, arg);
177 static int do_write(int fd, struct io_plan_arg *arg)
179 ssize_t ret = write(fd, arg->u1.cp, arg->u2.s);
185 return arg->u2.s == 0;
188 /* Queue some data to be written. */
189 struct io_plan *io_write_(struct io_conn *conn, const void *data, size_t len,
190 struct io_plan *(*next)(struct io_conn *, void *),
193 struct io_plan_arg *arg = io_plan_arg(conn, IO_OUT);
196 return set_always(conn, IO_OUT, next, next_arg);
198 arg->u1.const_vp = data;
201 return io_set_plan(conn, IO_OUT, do_write, next, next_arg);
204 static int do_read(int fd, struct io_plan_arg *arg)
206 ssize_t ret = read(fd, arg->u1.cp, arg->u2.s);
208 /* Errno isn't set if we hit EOF, so set it to distinct value */
216 return arg->u2.s == 0;
219 /* Queue a request to read into a buffer. */
220 struct io_plan *io_read_(struct io_conn *conn,
221 void *data, size_t len,
222 struct io_plan *(*next)(struct io_conn *, void *),
225 struct io_plan_arg *arg = io_plan_arg(conn, IO_IN);
228 return set_always(conn, IO_IN, next, next_arg);
233 return io_set_plan(conn, IO_IN, do_read, next, next_arg);
236 static int do_read_partial(int fd, struct io_plan_arg *arg)
238 ssize_t ret = read(fd, arg->u1.cp, *(size_t *)arg->u2.vp);
240 /* Errno isn't set if we hit EOF, so set it to distinct value */
246 *(size_t *)arg->u2.vp = ret;
250 /* Queue a partial request to read into a buffer. */
251 struct io_plan *io_read_partial_(struct io_conn *conn,
252 void *data, size_t maxlen, size_t *len,
253 struct io_plan *(*next)(struct io_conn *,
257 struct io_plan_arg *arg = io_plan_arg(conn, IO_IN);
260 return set_always(conn, IO_IN, next, next_arg);
263 /* We store the max len in here temporarily. */
267 return io_set_plan(conn, IO_IN, do_read_partial, next, next_arg);
270 static int do_write_partial(int fd, struct io_plan_arg *arg)
272 ssize_t ret = write(fd, arg->u1.cp, *(size_t *)arg->u2.vp);
276 *(size_t *)arg->u2.vp = ret;
280 /* Queue a partial write request. */
281 struct io_plan *io_write_partial_(struct io_conn *conn,
282 const void *data, size_t maxlen, size_t *len,
283 struct io_plan *(*next)(struct io_conn *,
287 struct io_plan_arg *arg = io_plan_arg(conn, IO_OUT);
290 return set_always(conn, IO_OUT, next, next_arg);
292 arg->u1.const_vp = data;
293 /* We store the max len in here temporarily. */
297 return io_set_plan(conn, IO_OUT, do_write_partial, next, next_arg);
300 static int do_connect(int fd, struct io_plan_arg *arg)
303 socklen_t len = sizeof(err);
305 /* Has async connect finished? */
306 ret = getsockopt(fd, SOL_SOCKET, SO_ERROR, &err, &len);
312 } else if (err == EINPROGRESS)
319 struct io_plan *io_connect_(struct io_conn *conn, const struct addrinfo *addr,
320 struct io_plan *(*next)(struct io_conn *, void *),
323 int fd = io_conn_fd(conn);
325 /* We don't actually need the arg, but we need it polling. */
326 io_plan_arg(conn, IO_OUT);
328 /* Note that io_new_conn() will make fd O_NONBLOCK */
330 /* Immediate connect can happen. */
331 if (connect(fd, addr->ai_addr, addr->ai_addrlen) == 0)
332 return set_always(conn, IO_OUT, next, next_arg);
334 if (errno != EINPROGRESS)
335 return io_close(conn);
337 return io_set_plan(conn, IO_OUT, do_connect, next, next_arg);
340 static struct io_plan *io_wait_dir(struct io_conn *conn,
342 enum io_direction dir,
343 struct io_plan *(*next)(struct io_conn *,
347 struct io_plan_arg *arg = io_plan_arg(conn, dir);
348 arg->u1.const_vp = wait;
350 conn->plan[dir].status = IO_WAITING;
352 return io_set_plan(conn, dir, NULL, next, next_arg);
355 struct io_plan *io_wait_(struct io_conn *conn,
357 struct io_plan *(*next)(struct io_conn *, void *),
360 return io_wait_dir(conn, wait, IO_IN, next, next_arg);
363 struct io_plan *io_out_wait_(struct io_conn *conn,
365 struct io_plan *(*next)(struct io_conn *, void *),
368 return io_wait_dir(conn, wait, IO_OUT, next, next_arg);
371 void io_wake(const void *wait)
376 /* Returns false if this should not be touched (eg. freed). */
377 static bool do_plan(struct io_conn *conn, struct io_plan *plan,
380 /* We shouldn't have polled for this event if this wasn't true! */
381 assert(plan->status == IO_POLLING_NOTSTARTED
382 || plan->status == IO_POLLING_STARTED);
384 switch (plan->io(conn->fd.fd, &plan->arg)) {
386 if (errno == EPIPE && idle_on_epipe) {
387 plan->status = IO_UNSET;
388 backend_new_plan(conn);
394 plan->status = IO_POLLING_STARTED;
397 return next_plan(conn, plan);
399 /* IO should only return -1, 0 or 1 */
404 void io_ready(struct io_conn *conn, int pollflags)
406 if (pollflags & POLLIN)
407 if (!do_plan(conn, &conn->plan[IO_IN], false))
410 if (pollflags & POLLOUT)
411 /* If we're writing to a closed pipe, we need to wait for
412 * read to fail if we're duplex: we want to drain it! */
413 do_plan(conn, &conn->plan[IO_OUT],
414 conn->plan[IO_IN].status == IO_POLLING_NOTSTARTED
415 || conn->plan[IO_IN].status == IO_POLLING_STARTED);
418 void io_do_always(struct io_plan *plan)
420 struct io_conn *conn;
422 assert(plan->status == IO_ALWAYS);
423 conn = container_of(plan, struct io_conn, plan[plan->dir]);
425 next_plan(conn, plan);
428 void io_do_wakeup(struct io_conn *conn, enum io_direction dir)
430 struct io_plan *plan = &conn->plan[dir];
432 assert(plan->status == IO_WAITING);
434 set_always(conn, dir, plan->next, plan->next_arg);
437 /* Close the connection, we're done. */
438 struct io_plan *io_close(struct io_conn *conn)
441 return &io_conn_freed;
444 struct io_plan *io_close_cb(struct io_conn *conn, void *next_arg)
446 return io_close(conn);
449 struct io_plan *io_close_taken_fd(struct io_conn *conn)
451 io_fd_block(conn->fd.fd, true);
453 cleanup_conn_without_close(conn);
454 return io_close(conn);
457 /* Exit the loop, returning this (non-NULL) arg. */
458 void io_break(const void *ret)
461 io_loop_return = (void *)ret;
464 struct io_plan *io_never(struct io_conn *conn, void *unused)
466 return io_always(conn, io_never_called, NULL);
469 int io_conn_fd(const struct io_conn *conn)
474 struct io_plan *io_duplex(struct io_conn *conn,
475 struct io_plan *in_plan, struct io_plan *out_plan)
477 assert(conn == container_of(in_plan, struct io_conn, plan[IO_IN]));
478 /* in_plan must be conn->plan[IO_IN], out_plan must be [IO_OUT] */
479 assert(out_plan == in_plan + 1);
483 struct io_plan *io_halfclose(struct io_conn *conn)
485 /* Both unset? OK. */
486 if (conn->plan[IO_IN].status == IO_UNSET
487 && conn->plan[IO_OUT].status == IO_UNSET)
488 return io_close(conn);
490 /* We leave this unset then. */
491 if (conn->plan[IO_IN].status == IO_UNSET)
492 return &conn->plan[IO_IN];
494 return &conn->plan[IO_OUT];
497 struct io_plan *io_set_plan(struct io_conn *conn, enum io_direction dir,
498 int (*io)(int fd, struct io_plan_arg *arg),
499 struct io_plan *(*next)(struct io_conn *, void *),
502 struct io_plan *plan = &conn->plan[dir];
506 plan->next_arg = next_arg;
507 assert(next != NULL);
512 bool io_plan_in_started(const struct io_conn *conn)
514 return conn->plan[IO_IN].status == IO_POLLING_STARTED;
517 bool io_plan_out_started(const struct io_conn *conn)
519 return conn->plan[IO_OUT].status == IO_POLLING_STARTED;
522 bool io_flush_sync(struct io_conn *conn)
524 struct io_plan *plan = &conn->plan[IO_OUT];
527 /* Not writing? Nothing to do. */
528 if (plan->status != IO_POLLING_STARTED
529 && plan->status != IO_POLLING_NOTSTARTED)
532 /* Synchronous please. */
533 io_fd_block(io_conn_fd(conn), true);
536 switch (plan->io(conn->fd.fd, &plan->arg)) {
540 /* Incomplete, try again. */
542 plan->status = IO_POLLING_STARTED;
546 /* In case they come back. */
547 set_always(conn, IO_OUT, plan->next, plan->next_arg);
550 /* IO should only return -1, 0 or 1 */
554 io_fd_block(io_conn_fd(conn), false);