conn->fd.fd = fd;
conn->finish = NULL;
conn->finish_arg = NULL;
- list_node_init(&conn->always);
if (!add_conn(conn))
return tal_free(conn);
return conn;
}
+bool io_conn_exclusive(struct io_conn *conn, bool exclusive)
+{
+ return backend_set_exclusive(&conn->plan[IO_IN], exclusive);
+}
+
+bool io_conn_out_exclusive(struct io_conn *conn, bool exclusive)
+{
+ return backend_set_exclusive(&conn->plan[IO_OUT], exclusive);
+}
+
void io_set_finish_(struct io_conn *conn,
void (*finish)(struct io_conn *, void *),
void *arg)
struct io_plan *plan = &conn->plan[dir];
plan->status = IO_ALWAYS;
- backend_new_always(conn);
+ /* Only happens on OOM, and only with non-default tal_backend. */
+ if (!backend_new_always(plan))
+ return NULL;
return io_set_plan(conn, dir, NULL, next, arg);
}
|| conn->plan[IO_IN].status == IO_POLLING_STARTED);
}
-void io_do_always(struct io_conn *conn)
+void io_do_always(struct io_plan *plan)
{
- /* There's a corner case where the in next_plan wakes up the
- * out, placing it in IO_ALWAYS and we end up processing it immediately,
- * only to leave it in the always list.
- *
- * Yet we can't just process one, in case they are both supposed
- * to be done, so grab state beforehand.
- */
- bool always_out = (conn->plan[IO_OUT].status == IO_ALWAYS);
+ struct io_conn *conn;
- if (conn->plan[IO_IN].status == IO_ALWAYS)
- if (!next_plan(conn, &conn->plan[IO_IN]))
- return;
+ assert(plan->status == IO_ALWAYS);
+ conn = container_of(plan, struct io_conn, plan[plan->dir]);
- if (always_out) {
- /* You can't *unalways* a conn (except by freeing, in which
- * case next_plan() returned false */
- assert(conn->plan[IO_OUT].status == IO_ALWAYS);
- next_plan(conn, &conn->plan[IO_OUT]);
- }
+ next_plan(conn, plan);
}
void io_do_wakeup(struct io_conn *conn, enum io_direction dir)
return conn->plan[IO_OUT].status == IO_POLLING_STARTED;
}
+/* Despite being a TCP expert, I missed the full extent of this
+ * problem. The legendary ZmnSCPxj implemented it (with the URL
+ * pointing to the explanation), and I imitate that here. */
+struct io_plan *io_sock_shutdown(struct io_conn *conn)
+{
+ if (shutdown(io_conn_fd(conn), SHUT_WR) != 0)
+ return io_close(conn);
+
+ /* And leave unset .*/
+ return &conn->plan[IO_IN];
+}
+
bool io_flush_sync(struct io_conn *conn)
{
struct io_plan *plan = &conn->plan[IO_OUT];