if (plan == &io_conn_freed)
return false;
- /* It should have set a plan inside this conn (or duplex) */
- assert(plan == &conn->plan[IO_IN]
- || plan == &conn->plan[IO_OUT]
- || plan == &conn->plan[2]);
+ assert(plan == &conn->plan[plan->dir]);
assert(conn->plan[IO_IN].status != IO_UNSET
|| conn->plan[IO_OUT].status != IO_UNSET);
return true;
}
-static void set_blocking(int fd, bool block)
+bool io_fd_block(int fd, bool block)
{
int flags = fcntl(fd, F_GETFL);
+ if (flags == -1)
+ return false;
+
if (block)
flags &= ~O_NONBLOCK;
else
flags |= O_NONBLOCK;
- fcntl(fd, F_SETFL, flags);
+ return fcntl(fd, F_SETFL, flags) != -1;
}
struct io_conn *io_new_conn_(const tal_t *ctx, int fd,
conn->fd.fd = fd;
conn->finish = NULL;
conn->finish_arg = NULL;
- list_node_init(&conn->always);
if (!add_conn(conn))
return tal_free(conn);
/* Keep our I/O async. */
- set_blocking(fd, false);
+ io_fd_block(fd, false);
+
+ /* So we can get back from plan -> conn later */
+ conn->plan[IO_OUT].dir = IO_OUT;
+ conn->plan[IO_IN].dir = IO_IN;
/* We start with out doing nothing, and in doing our init. */
conn->plan[IO_OUT].status = IO_UNSET;
return conn;
}
+bool io_conn_exclusive(struct io_conn *conn, bool exclusive)
+{
+ return backend_set_exclusive(&conn->plan[IO_IN], exclusive);
+}
+
+bool io_conn_out_exclusive(struct io_conn *conn, bool exclusive)
+{
+ return backend_set_exclusive(&conn->plan[IO_OUT], exclusive);
+}
+
void io_set_finish_(struct io_conn *conn,
void (*finish)(struct io_conn *, void *),
void *arg)
{
assert(conn->plan[dir].status == IO_UNSET);
- conn->plan[dir].status = IO_POLLING;
+ conn->plan[dir].status = IO_POLLING_NOTSTARTED;
return &conn->plan[dir].arg;
}
struct io_plan *plan = &conn->plan[dir];
plan->status = IO_ALWAYS;
- backend_new_always(conn);
+ /* Only happens on OOM, and only with non-default tal_backend. */
+ if (!backend_new_always(plan))
+ return NULL;
return io_set_plan(conn, dir, NULL, next, arg);
}
static int do_read(int fd, struct io_plan_arg *arg)
{
ssize_t ret = read(fd, arg->u1.cp, arg->u2.s);
- if (ret <= 0)
+ if (ret <= 0) {
+ /* Errno isn't set if we hit EOF, so set it to distinct value */
+ if (ret == 0)
+ errno = 0;
return -1;
+ }
arg->u1.cp += ret;
arg->u2.s -= ret;
static int do_read_partial(int fd, struct io_plan_arg *arg)
{
ssize_t ret = read(fd, arg->u1.cp, *(size_t *)arg->u2.vp);
- if (ret <= 0)
+ if (ret <= 0) {
+ /* Errno isn't set if we hit EOF, so set it to distinct value */
+ if (ret == 0)
+ errno = 0;
return -1;
+ }
*(size_t *)arg->u2.vp = ret;
return 1;
backend_wake(wait);
}
-/* Returns false if this has been freed. */
-static bool do_plan(struct io_conn *conn, struct io_plan *plan)
+/* Returns false if this should not be touched (eg. freed). */
+static bool do_plan(struct io_conn *conn, struct io_plan *plan,
+ bool idle_on_epipe)
{
/* We shouldn't have polled for this event if this wasn't true! */
- assert(plan->status == IO_POLLING);
+ assert(plan->status == IO_POLLING_NOTSTARTED
+ || plan->status == IO_POLLING_STARTED);
switch (plan->io(conn->fd.fd, &plan->arg)) {
case -1:
+ if (errno == EPIPE && idle_on_epipe) {
+ plan->status = IO_UNSET;
+ backend_new_plan(conn);
+ return false;
+ }
io_close(conn);
return false;
case 0:
+ plan->status = IO_POLLING_STARTED;
return true;
case 1:
return next_plan(conn, plan);
void io_ready(struct io_conn *conn, int pollflags)
{
if (pollflags & POLLIN)
- if (!do_plan(conn, &conn->plan[IO_IN]))
+ if (!do_plan(conn, &conn->plan[IO_IN], false))
return;
if (pollflags & POLLOUT)
- do_plan(conn, &conn->plan[IO_OUT]);
+ /* If we're writing to a closed pipe, we need to wait for
+ * read to fail if we're duplex: we want to drain it! */
+ do_plan(conn, &conn->plan[IO_OUT],
+ conn->plan[IO_IN].status == IO_POLLING_NOTSTARTED
+ || conn->plan[IO_IN].status == IO_POLLING_STARTED);
}
-void io_do_always(struct io_conn *conn)
+void io_do_always(struct io_plan *plan)
{
- if (conn->plan[IO_IN].status == IO_ALWAYS)
- if (!next_plan(conn, &conn->plan[IO_IN]))
- return;
+ struct io_conn *conn;
+
+ assert(plan->status == IO_ALWAYS);
+ conn = container_of(plan, struct io_conn, plan[plan->dir]);
- if (conn->plan[IO_OUT].status == IO_ALWAYS)
- next_plan(conn, &conn->plan[IO_OUT]);
+ next_plan(conn, plan);
}
void io_do_wakeup(struct io_conn *conn, enum io_direction dir)
struct io_plan *io_close_taken_fd(struct io_conn *conn)
{
- set_blocking(conn->fd.fd, true);
+ io_fd_block(conn->fd.fd, true);
cleanup_conn_without_close(conn);
return io_close(conn);
assert(conn == container_of(in_plan, struct io_conn, plan[IO_IN]));
/* in_plan must be conn->plan[IO_IN], out_plan must be [IO_OUT] */
assert(out_plan == in_plan + 1);
- return out_plan + 1;
+ return in_plan;
}
struct io_plan *io_halfclose(struct io_conn *conn)
return plan;
}
+bool io_plan_in_started(const struct io_conn *conn)
+{
+ return conn->plan[IO_IN].status == IO_POLLING_STARTED;
+}
+
+bool io_plan_out_started(const struct io_conn *conn)
+{
+ return conn->plan[IO_OUT].status == IO_POLLING_STARTED;
+}
+
+/* Despite being a TCP expert, I missed the full extent of this
+ * problem. The legendary ZmnSCPxj implemented it (with the URL
+ * pointing to the explanation), and I imitate that here. */
+struct io_plan *io_sock_shutdown(struct io_conn *conn)
+{
+ if (shutdown(io_conn_fd(conn), SHUT_WR) != 0)
+ return io_close(conn);
+
+ /* And leave unset .*/
+ return &conn->plan[IO_IN];
+}
+
bool io_flush_sync(struct io_conn *conn)
{
struct io_plan *plan = &conn->plan[IO_OUT];
bool ok;
/* Not writing? Nothing to do. */
- if (plan->status != IO_POLLING)
+ if (plan->status != IO_POLLING_STARTED
+ && plan->status != IO_POLLING_NOTSTARTED)
return true;
/* Synchronous please. */
- set_blocking(io_conn_fd(conn), true);
+ io_fd_block(io_conn_fd(conn), true);
again:
switch (plan->io(conn->fd.fd, &plan->arg)) {
break;
/* Incomplete, try again. */
case 0:
+ plan->status = IO_POLLING_STARTED;
goto again;
case 1:
ok = true;
abort();
}
- set_blocking(io_conn_fd(conn), false);
+ io_fd_block(io_conn_fd(conn), false);
return ok;
}