1 /* Licensed under LGPLv2.1+ - see LICENSE file for details */
5 #include <sys/socket.h>
13 #include <ccan/container_of/container_of.h>
17 struct io_plan io_conn_freed;
19 struct io_listener *io_new_listener_(const tal_t *ctx, int fd,
20 struct io_plan *(*init)(struct io_conn *,
24 struct io_listener *l = tal(ctx, struct io_listener);
28 l->fd.listener = true;
38 void io_close_listener(struct io_listener *l)
43 static struct io_plan *io_never_called(struct io_conn *conn, void *arg)
48 /* Returns false if conn was freed. */
49 static bool next_plan(struct io_conn *conn, struct io_plan *plan)
51 struct io_plan *(*next)(struct io_conn *, void *arg);
55 plan->status = IO_UNSET;
57 plan->next = io_never_called;
59 plan = next(conn, plan->next_arg);
61 if (plan == &io_conn_freed)
64 assert(plan == &conn->plan[plan->dir]);
65 assert(conn->plan[IO_IN].status != IO_UNSET
66 || conn->plan[IO_OUT].status != IO_UNSET);
68 backend_new_plan(conn);
72 bool io_fd_block(int fd, bool block)
74 int flags = fcntl(fd, F_GETFL);
84 return fcntl(fd, F_SETFL, flags) != -1;
87 struct io_conn *io_new_conn_(const tal_t *ctx, int fd,
88 struct io_plan *(*init)(struct io_conn *, void *),
91 struct io_conn *conn = tal(ctx, struct io_conn);
96 conn->fd.listener = false;
99 conn->finish_arg = NULL;
102 return tal_free(conn);
104 /* Keep our I/O async. */
105 io_fd_block(fd, false);
107 /* So we can get back from plan -> conn later */
108 conn->plan[IO_OUT].dir = IO_OUT;
109 conn->plan[IO_IN].dir = IO_IN;
111 /* We start with out doing nothing, and in doing our init. */
112 conn->plan[IO_OUT].status = IO_UNSET;
114 conn->plan[IO_IN].next = init;
115 conn->plan[IO_IN].next_arg = arg;
116 if (!next_plan(conn, &conn->plan[IO_IN]))
122 bool io_conn_exclusive(struct io_conn *conn, bool exclusive)
124 return backend_set_exclusive(&conn->plan[IO_IN], exclusive);
127 bool io_conn_out_exclusive(struct io_conn *conn, bool exclusive)
129 return backend_set_exclusive(&conn->plan[IO_OUT], exclusive);
132 void io_set_finish_(struct io_conn *conn,
133 void (*finish)(struct io_conn *, void *),
136 conn->finish = finish;
137 conn->finish_arg = arg;
140 struct io_plan_arg *io_plan_arg(struct io_conn *conn, enum io_direction dir)
142 assert(conn->plan[dir].status == IO_UNSET);
144 conn->plan[dir].status = IO_POLLING_NOTSTARTED;
145 return &conn->plan[dir].arg;
148 static struct io_plan *set_always(struct io_conn *conn,
149 enum io_direction dir,
150 struct io_plan *(*next)(struct io_conn *,
154 struct io_plan *plan = &conn->plan[dir];
156 plan->status = IO_ALWAYS;
157 /* Only happens on OOM, and only with non-default tal_backend. */
158 if (!backend_new_always(plan))
160 return io_set_plan(conn, dir, NULL, next, arg);
163 static struct io_plan *io_always_dir(struct io_conn *conn,
164 enum io_direction dir,
165 struct io_plan *(*next)(struct io_conn *,
169 return set_always(conn, dir, next, arg);
172 struct io_plan *io_always_(struct io_conn *conn,
173 struct io_plan *(*next)(struct io_conn *, void *),
176 return io_always_dir(conn, IO_IN, next, arg);
179 struct io_plan *io_out_always_(struct io_conn *conn,
180 struct io_plan *(*next)(struct io_conn *,
184 return io_always_dir(conn, IO_OUT, next, arg);
187 static int do_write(int fd, struct io_plan_arg *arg)
189 ssize_t ret = write(fd, arg->u1.cp, arg->u2.s);
195 return arg->u2.s == 0;
198 /* Queue some data to be written. */
199 struct io_plan *io_write_(struct io_conn *conn, const void *data, size_t len,
200 struct io_plan *(*next)(struct io_conn *, void *),
203 struct io_plan_arg *arg = io_plan_arg(conn, IO_OUT);
206 return set_always(conn, IO_OUT, next, next_arg);
208 arg->u1.const_vp = data;
211 return io_set_plan(conn, IO_OUT, do_write, next, next_arg);
214 static int do_read(int fd, struct io_plan_arg *arg)
216 ssize_t ret = read(fd, arg->u1.cp, arg->u2.s);
218 /* Errno isn't set if we hit EOF, so set it to distinct value */
226 return arg->u2.s == 0;
229 /* Queue a request to read into a buffer. */
230 struct io_plan *io_read_(struct io_conn *conn,
231 void *data, size_t len,
232 struct io_plan *(*next)(struct io_conn *, void *),
235 struct io_plan_arg *arg = io_plan_arg(conn, IO_IN);
238 return set_always(conn, IO_IN, next, next_arg);
243 return io_set_plan(conn, IO_IN, do_read, next, next_arg);
246 static int do_read_partial(int fd, struct io_plan_arg *arg)
248 ssize_t ret = read(fd, arg->u1.cp, *(size_t *)arg->u2.vp);
250 /* Errno isn't set if we hit EOF, so set it to distinct value */
256 *(size_t *)arg->u2.vp = ret;
260 /* Queue a partial request to read into a buffer. */
261 struct io_plan *io_read_partial_(struct io_conn *conn,
262 void *data, size_t maxlen, size_t *len,
263 struct io_plan *(*next)(struct io_conn *,
267 struct io_plan_arg *arg = io_plan_arg(conn, IO_IN);
270 return set_always(conn, IO_IN, next, next_arg);
273 /* We store the max len in here temporarily. */
277 return io_set_plan(conn, IO_IN, do_read_partial, next, next_arg);
280 static int do_write_partial(int fd, struct io_plan_arg *arg)
282 ssize_t ret = write(fd, arg->u1.cp, *(size_t *)arg->u2.vp);
286 *(size_t *)arg->u2.vp = ret;
290 /* Queue a partial write request. */
291 struct io_plan *io_write_partial_(struct io_conn *conn,
292 const void *data, size_t maxlen, size_t *len,
293 struct io_plan *(*next)(struct io_conn *,
297 struct io_plan_arg *arg = io_plan_arg(conn, IO_OUT);
300 return set_always(conn, IO_OUT, next, next_arg);
302 arg->u1.const_vp = data;
303 /* We store the max len in here temporarily. */
307 return io_set_plan(conn, IO_OUT, do_write_partial, next, next_arg);
310 static int do_connect(int fd, struct io_plan_arg *arg)
313 socklen_t len = sizeof(err);
315 /* Has async connect finished? */
316 ret = getsockopt(fd, SOL_SOCKET, SO_ERROR, &err, &len);
322 } else if (err == EINPROGRESS)
329 struct io_plan *io_connect_(struct io_conn *conn, const struct addrinfo *addr,
330 struct io_plan *(*next)(struct io_conn *, void *),
333 int fd = io_conn_fd(conn);
335 /* We don't actually need the arg, but we need it polling. */
336 io_plan_arg(conn, IO_OUT);
338 /* Note that io_new_conn() will make fd O_NONBLOCK */
340 /* Immediate connect can happen. */
341 if (connect(fd, addr->ai_addr, addr->ai_addrlen) == 0)
342 return set_always(conn, IO_OUT, next, next_arg);
344 if (errno != EINPROGRESS)
345 return io_close(conn);
347 return io_set_plan(conn, IO_OUT, do_connect, next, next_arg);
350 static struct io_plan *io_wait_dir(struct io_conn *conn,
352 enum io_direction dir,
353 struct io_plan *(*next)(struct io_conn *,
357 struct io_plan_arg *arg = io_plan_arg(conn, dir);
358 arg->u1.const_vp = wait;
360 conn->plan[dir].status = IO_WAITING;
362 return io_set_plan(conn, dir, NULL, next, next_arg);
365 struct io_plan *io_wait_(struct io_conn *conn,
367 struct io_plan *(*next)(struct io_conn *, void *),
370 return io_wait_dir(conn, wait, IO_IN, next, next_arg);
373 struct io_plan *io_out_wait_(struct io_conn *conn,
375 struct io_plan *(*next)(struct io_conn *, void *),
378 return io_wait_dir(conn, wait, IO_OUT, next, next_arg);
381 void io_wake(const void *wait)
386 /* Returns false if this should not be touched (eg. freed). */
387 static bool do_plan(struct io_conn *conn, struct io_plan *plan,
390 /* We shouldn't have polled for this event if this wasn't true! */
391 assert(plan->status == IO_POLLING_NOTSTARTED
392 || plan->status == IO_POLLING_STARTED);
394 switch (plan->io(conn->fd.fd, &plan->arg)) {
396 if (errno == EPIPE && idle_on_epipe) {
397 plan->status = IO_UNSET;
398 backend_new_plan(conn);
404 plan->status = IO_POLLING_STARTED;
407 return next_plan(conn, plan);
409 /* IO should only return -1, 0 or 1 */
414 void io_ready(struct io_conn *conn, int pollflags)
416 if (pollflags & POLLIN)
417 if (!do_plan(conn, &conn->plan[IO_IN], false))
420 if (pollflags & POLLOUT)
421 /* If we're writing to a closed pipe, we need to wait for
422 * read to fail if we're duplex: we want to drain it! */
423 do_plan(conn, &conn->plan[IO_OUT],
424 conn->plan[IO_IN].status == IO_POLLING_NOTSTARTED
425 || conn->plan[IO_IN].status == IO_POLLING_STARTED);
428 void io_do_always(struct io_plan *plan)
430 struct io_conn *conn;
432 assert(plan->status == IO_ALWAYS);
433 conn = container_of(plan, struct io_conn, plan[plan->dir]);
435 next_plan(conn, plan);
438 void io_do_wakeup(struct io_conn *conn, enum io_direction dir)
440 struct io_plan *plan = &conn->plan[dir];
442 assert(plan->status == IO_WAITING);
444 set_always(conn, dir, plan->next, plan->next_arg);
447 /* Close the connection, we're done. */
448 struct io_plan *io_close(struct io_conn *conn)
451 return &io_conn_freed;
454 struct io_plan *io_close_cb(struct io_conn *conn, void *next_arg)
456 return io_close(conn);
459 struct io_plan *io_close_taken_fd(struct io_conn *conn)
461 io_fd_block(conn->fd.fd, true);
463 cleanup_conn_without_close(conn);
464 return io_close(conn);
467 /* Exit the loop, returning this (non-NULL) arg. */
468 void io_break(const void *ret)
471 io_loop_return = (void *)ret;
474 struct io_plan *io_never(struct io_conn *conn, void *unused)
476 return io_always(conn, io_never_called, NULL);
479 int io_conn_fd(const struct io_conn *conn)
484 struct io_plan *io_duplex(struct io_conn *conn,
485 struct io_plan *in_plan, struct io_plan *out_plan)
487 assert(conn == container_of(in_plan, struct io_conn, plan[IO_IN]));
488 /* in_plan must be conn->plan[IO_IN], out_plan must be [IO_OUT] */
489 assert(out_plan == in_plan + 1);
493 struct io_plan *io_halfclose(struct io_conn *conn)
495 /* Both unset? OK. */
496 if (conn->plan[IO_IN].status == IO_UNSET
497 && conn->plan[IO_OUT].status == IO_UNSET)
498 return io_close(conn);
500 /* We leave this unset then. */
501 if (conn->plan[IO_IN].status == IO_UNSET)
502 return &conn->plan[IO_IN];
504 return &conn->plan[IO_OUT];
507 struct io_plan *io_set_plan(struct io_conn *conn, enum io_direction dir,
508 int (*io)(int fd, struct io_plan_arg *arg),
509 struct io_plan *(*next)(struct io_conn *, void *),
512 struct io_plan *plan = &conn->plan[dir];
516 plan->next_arg = next_arg;
517 assert(next != NULL);
522 bool io_plan_in_started(const struct io_conn *conn)
524 return conn->plan[IO_IN].status == IO_POLLING_STARTED;
527 bool io_plan_out_started(const struct io_conn *conn)
529 return conn->plan[IO_OUT].status == IO_POLLING_STARTED;
532 bool io_flush_sync(struct io_conn *conn)
534 struct io_plan *plan = &conn->plan[IO_OUT];
537 /* Not writing? Nothing to do. */
538 if (plan->status != IO_POLLING_STARTED
539 && plan->status != IO_POLLING_NOTSTARTED)
542 /* Synchronous please. */
543 io_fd_block(io_conn_fd(conn), true);
546 switch (plan->io(conn->fd.fd, &plan->arg)) {
550 /* Incomplete, try again. */
552 plan->status = IO_POLLING_STARTED;
556 /* In case they come back. */
557 set_always(conn, IO_OUT, plan->next, plan->next_arg);
560 /* IO should only return -1, 0 or 1 */
564 io_fd_block(io_conn_fd(conn), false);