if (plan == &io_conn_freed)
return false;
- /* It should have set a plan inside this conn (or duplex) */
- assert(plan == &conn->plan[IO_IN]
- || plan == &conn->plan[IO_OUT]
- || plan == &conn->plan[2]);
+ assert(plan == &conn->plan[plan->dir]);
assert(conn->plan[IO_IN].status != IO_UNSET
|| conn->plan[IO_OUT].status != IO_UNSET);
conn->fd.fd = fd;
conn->finish = NULL;
conn->finish_arg = NULL;
- list_node_init(&conn->always);
if (!add_conn(conn))
return tal_free(conn);
/* Keep our I/O async. */
io_fd_block(fd, false);
+ /* So we can get back from plan -> conn later */
+ conn->plan[IO_OUT].dir = IO_OUT;
+ conn->plan[IO_IN].dir = IO_IN;
+
/* We start with out doing nothing, and in doing our init. */
conn->plan[IO_OUT].status = IO_UNSET;
return conn;
}
+bool io_conn_exclusive(struct io_conn *conn, bool exclusive)
+{
+ return backend_set_exclusive(&conn->plan[IO_IN], exclusive);
+}
+
+bool io_conn_out_exclusive(struct io_conn *conn, bool exclusive)
+{
+ return backend_set_exclusive(&conn->plan[IO_OUT], exclusive);
+}
+
void io_set_finish_(struct io_conn *conn,
void (*finish)(struct io_conn *, void *),
void *arg)
{
assert(conn->plan[dir].status == IO_UNSET);
- conn->plan[dir].status = IO_POLLING;
+ conn->plan[dir].status = IO_POLLING_NOTSTARTED;
return &conn->plan[dir].arg;
}
struct io_plan *plan = &conn->plan[dir];
plan->status = IO_ALWAYS;
- backend_new_always(conn);
+ /* Only happens on OOM, and only with non-default tal_backend. */
+ if (!backend_new_always(plan))
+ return NULL;
return io_set_plan(conn, dir, NULL, next, arg);
}
static int do_read(int fd, struct io_plan_arg *arg)
{
ssize_t ret = read(fd, arg->u1.cp, arg->u2.s);
- if (ret <= 0)
+ if (ret <= 0) {
+ /* Errno isn't set if we hit EOF, so set it to distinct value */
+ if (ret == 0)
+ errno = 0;
return -1;
+ }
arg->u1.cp += ret;
arg->u2.s -= ret;
static int do_read_partial(int fd, struct io_plan_arg *arg)
{
ssize_t ret = read(fd, arg->u1.cp, *(size_t *)arg->u2.vp);
- if (ret <= 0)
+ if (ret <= 0) {
+ /* Errno isn't set if we hit EOF, so set it to distinct value */
+ if (ret == 0)
+ errno = 0;
return -1;
+ }
*(size_t *)arg->u2.vp = ret;
return 1;
bool idle_on_epipe)
{
/* We shouldn't have polled for this event if this wasn't true! */
- assert(plan->status == IO_POLLING);
+ assert(plan->status == IO_POLLING_NOTSTARTED
+ || plan->status == IO_POLLING_STARTED);
switch (plan->io(conn->fd.fd, &plan->arg)) {
case -1:
io_close(conn);
return false;
case 0:
+ plan->status = IO_POLLING_STARTED;
return true;
case 1:
return next_plan(conn, plan);
/* If we're writing to a closed pipe, we need to wait for
* read to fail if we're duplex: we want to drain it! */
do_plan(conn, &conn->plan[IO_OUT],
- conn->plan[IO_IN].status == IO_POLLING);
+ conn->plan[IO_IN].status == IO_POLLING_NOTSTARTED
+ || conn->plan[IO_IN].status == IO_POLLING_STARTED);
}
-void io_do_always(struct io_conn *conn)
+void io_do_always(struct io_plan *plan)
{
- /* There's a corner case where the in next_plan wakes up the
- * out, placing it in IO_ALWAYS and we end up processing it immediately,
- * only to leave it in the always list.
- *
- * Yet we can't just process one, in case they are both supposed
- * to be done, so grab state beforehand.
- */
- bool always_out = (conn->plan[IO_OUT].status == IO_ALWAYS);
+ struct io_conn *conn;
- if (conn->plan[IO_IN].status == IO_ALWAYS)
- if (!next_plan(conn, &conn->plan[IO_IN]))
- return;
+ assert(plan->status == IO_ALWAYS);
+ conn = container_of(plan, struct io_conn, plan[plan->dir]);
- if (always_out) {
- /* You can't *unalways* a conn (except by freeing, in which
- * case next_plan() returned false */
- assert(conn->plan[IO_OUT].status == IO_ALWAYS);
- next_plan(conn, &conn->plan[IO_OUT]);
- }
+ next_plan(conn, plan);
}
void io_do_wakeup(struct io_conn *conn, enum io_direction dir)
assert(conn == container_of(in_plan, struct io_conn, plan[IO_IN]));
/* in_plan must be conn->plan[IO_IN], out_plan must be [IO_OUT] */
assert(out_plan == in_plan + 1);
- return out_plan + 1;
+ return in_plan;
}
struct io_plan *io_halfclose(struct io_conn *conn)
return plan;
}
+bool io_plan_in_started(const struct io_conn *conn)
+{
+ return conn->plan[IO_IN].status == IO_POLLING_STARTED;
+}
+
+bool io_plan_out_started(const struct io_conn *conn)
+{
+ return conn->plan[IO_OUT].status == IO_POLLING_STARTED;
+}
+
+/* Despite being a TCP expert, I missed the full extent of this
+ * problem. The legendary ZmnSCPxj implemented it (with the URL
+ * pointing to the explanation), and I imitate that here. */
+struct io_plan *io_sock_shutdown(struct io_conn *conn)
+{
+ if (shutdown(io_conn_fd(conn), SHUT_WR) != 0)
+ return io_close(conn);
+
+ /* And leave unset .*/
+ return &conn->plan[IO_IN];
+}
+
bool io_flush_sync(struct io_conn *conn)
{
struct io_plan *plan = &conn->plan[IO_OUT];
bool ok;
/* Not writing? Nothing to do. */
- if (plan->status != IO_POLLING)
+ if (plan->status != IO_POLLING_STARTED
+ && plan->status != IO_POLLING_NOTSTARTED)
return true;
/* Synchronous please. */
break;
/* Incomplete, try again. */
case 0:
+ plan->status = IO_POLLING_STARTED;
goto again;
case 1:
ok = true;