void *io_loop_return;
+struct io_alloc io_alloc = {
+ malloc, realloc, free
+};
+
#ifdef DEBUG
/* Set to skip the next plan. */
bool io_plan_nodebug;
if (!ready->plan.next) {
/* Call finish function immediately. */
if (ready->finish) {
- errno = ready->plan.u.close.saved_errno;
+ errno = ready->plan.u1.s;
ready->finish(ready, ready->finish_arg);
ready->finish = NULL;
}
case 1: /* Done: get next plan. */
if (timeout_active(conn))
backend_del_timeout(conn);
+ /* In case they call io_duplex, clear our poll flags so
+ * both sides don't seem to be both doing read or write
+ * (See assert(!mask || pfd->events != mask) in poll.c) */
+ conn->plan.pollflag = 0;
conn->plan.next(conn, conn->plan.next_arg);
break;
default:
void (*init)(int fd, void *arg),
void *arg)
{
- struct io_listener *l = malloc(sizeof(*l));
+ struct io_listener *l = io_alloc.alloc(sizeof(*l));
if (!l)
return NULL;
l->init = init;
l->arg = arg;
if (!add_listener(l)) {
- free(l);
+ io_alloc.free(l);
return NULL;
}
return l;
{
close(l->fd.fd);
del_listener(l);
- free(l);
+ io_alloc.free(l);
}
struct io_conn *io_new_conn_(int fd, struct io_plan plan)
{
- struct io_conn *conn = malloc(sizeof(*conn));
+ struct io_conn *conn = io_alloc.alloc(sizeof(*conn));
io_plan_debug_again();
conn->duplex = NULL;
conn->timeout = NULL;
if (!add_conn(conn)) {
- free(conn);
+ io_alloc.free(conn);
return NULL;
}
return conn;
assert(!old->duplex);
- conn = malloc(sizeof(*conn));
+ conn = io_alloc.alloc(sizeof(*conn));
if (!conn)
return NULL;
conn->finish_arg = NULL;
conn->timeout = NULL;
if (!add_duplex(conn)) {
- free(conn);
+ io_alloc.free(conn);
return NULL;
}
old->duplex = conn;
assert(cb);
if (!conn->timeout) {
- conn->timeout = malloc(sizeof(*conn->timeout));
+ conn->timeout = io_alloc.alloc(sizeof(*conn->timeout));
if (!conn->timeout)
return false;
} else
/* Returns true if we're finished. */
static int do_write(int fd, struct io_plan *plan)
{
- ssize_t ret = write(fd, plan->u.write.buf, plan->u.write.len);
+ ssize_t ret = write(fd, plan->u1.cp, plan->u2.s);
if (ret < 0)
return io_debug_io(-1);
- plan->u.write.buf += ret;
- plan->u.write.len -= ret;
- return io_debug_io(plan->u.write.len == 0);
+ plan->u1.cp += ret;
+ plan->u2.s -= ret;
+ return io_debug_io(plan->u2.s == 0);
}
/* Queue some data to be written. */
struct io_plan plan;
assert(cb);
- plan.u.write.buf = data;
- plan.u.write.len = len;
+ plan.u1.const_vp = data;
+ plan.u2.s = len;
plan.io = do_write;
plan.next = cb;
plan.next_arg = arg;
static int do_read(int fd, struct io_plan *plan)
{
- ssize_t ret = read(fd, plan->u.read.buf, plan->u.read.len);
+ ssize_t ret = read(fd, plan->u1.cp, plan->u2.s);
if (ret <= 0)
return io_debug_io(-1);
- plan->u.read.buf += ret;
- plan->u.read.len -= ret;
- return io_debug_io(plan->u.read.len == 0);
+ plan->u1.cp += ret;
+ plan->u2.s -= ret;
+ return io_debug_io(plan->u2.s == 0);
}
/* Queue a request to read into a buffer. */
struct io_plan plan;
assert(cb);
- plan.u.read.buf = data;
- plan.u.read.len = len;
+ plan.u1.cp = data;
+ plan.u2.s = len;
plan.io = do_read;
plan.next = cb;
plan.next_arg = arg;
static int do_read_partial(int fd, struct io_plan *plan)
{
- ssize_t ret = read(fd, plan->u.readpart.buf, *plan->u.readpart.lenp);
+ ssize_t ret = read(fd, plan->u1.cp, *(size_t *)plan->u2.vp);
if (ret <= 0)
return io_debug_io(-1);
- *plan->u.readpart.lenp = ret;
+ *(size_t *)plan->u2.vp = ret;
return io_debug_io(1);
}
struct io_plan plan;
assert(cb);
- plan.u.readpart.buf = data;
- plan.u.readpart.lenp = len;
+ plan.u1.cp = data;
+ plan.u2.vp = len;
plan.io = do_read_partial;
plan.next = cb;
plan.next_arg = arg;
static int do_write_partial(int fd, struct io_plan *plan)
{
- ssize_t ret = write(fd, plan->u.writepart.buf, *plan->u.writepart.lenp);
+ ssize_t ret = write(fd, plan->u1.cp, *(size_t *)plan->u2.vp);
if (ret < 0)
return io_debug_io(-1);
- *plan->u.writepart.lenp = ret;
+ *(size_t *)plan->u2.vp = ret;
return io_debug_io(1);
}
struct io_plan plan;
assert(cb);
- plan.u.writepart.buf = data;
- plan.u.writepart.lenp = len;
+ plan.u1.const_vp = data;
+ plan.u2.vp = len;
plan.io = do_write_partial;
plan.next = cb;
plan.next_arg = arg;
if (err == 0) {
/* Restore blocking if it was initially. */
- fcntl(fd, F_SETFD, plan->u.len_len.len1);
+ fcntl(fd, F_SETFD, plan->u1.s);
return 1;
}
return 0;
plan.next_arg = arg;
/* Save old flags, set nonblock if not already. */
- plan.u.len_len.len1 = fcntl(fd, F_GETFD);
- fcntl(fd, F_SETFD, plan.u.len_len.len1 | O_NONBLOCK);
+ plan.u1.s = fcntl(fd, F_GETFD);
+ fcntl(fd, F_SETFD, plan.u1.s | O_NONBLOCK);
/* Immediate connect can happen. */
if (connect(fd, addr->ai_addr, addr->ai_addrlen) == 0) {
return plan;
}
+bool io_is_idle(const struct io_conn *conn)
+{
+ return conn->plan.io == NULL;
+}
+
void io_wake_(struct io_conn *conn, struct io_plan plan)
{
void io_ready(struct io_conn *conn)
{
+ /* Beware io_close_other! */
+ if (!conn->plan.next)
+ return;
+
set_current(conn);
switch (conn->plan.io(conn->fd.fd, &conn->plan)) {
case -1: /* Failure means a new plan: close up. */
case 1: /* Done: get next plan. */
if (timeout_active(conn))
backend_del_timeout(conn);
+ /* In case they call io_duplex, clear our poll flags so
+ * both sides don't seem to be both doing read or write
+ * (See assert(!mask || pfd->events != mask) in poll.c) */
+ conn->plan.pollflag = 0;
conn->plan = conn->plan.next(conn, conn->plan.next_arg);
backend_plan_changed(conn);
}
plan.pollflag = 0;
/* This means we're closing. */
plan.next = NULL;
- plan.u.close.saved_errno = errno;
+ plan.u1.s = errno;
return plan;
}
return io_close();
}
+void io_close_other(struct io_conn *conn)
+{
+ conn->plan = io_close_();
+ backend_plan_changed(conn);
+}
+
/* Exit the loop, returning this (non-NULL) arg. */
struct io_plan io_break_(void *ret, struct io_plan plan)
{
return plan;
}
+
+int io_conn_fd(const struct io_conn *conn)
+{
+ return conn->fd.fd;
+}
+
+void io_set_alloc(void *(*allocfn)(size_t size),
+ void *(*reallocfn)(void *ptr, size_t size),
+ void (*freefn)(void *ptr))
+{
+ io_alloc.alloc = allocfn;
+ io_alloc.realloc = reallocfn;
+ io_alloc.free = freefn;
+}