X-Git-Url: http://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Fio%2Fio.c;h=c5c9bee0daf0329f433eec930058bfd20905f96c;hp=c19b25efe8d12acbe6c5ecc669d01ff54c4801c2;hb=1966714494a5de39b2ee944fcc01f333c1741dbd;hpb=cef578da77d657701616161f3c3bf826186a024e diff --git a/ccan/io/io.c b/ccan/io/io.c index c19b25ef..c5c9bee0 100644 --- a/ccan/io/io.c +++ b/ccan/io/io.c @@ -8,16 +8,20 @@ #include #include #include -#include +#include +#include +#include void *io_loop_return; -struct io_listener *io_new_listener_(int fd, - void (*init)(int fd, void *arg), +struct io_plan io_conn_freed; + +struct io_listener *io_new_listener_(const tal_t *ctx, int fd, + struct io_plan *(*init)(struct io_conn *, + void *), void *arg) { - struct io_listener *l = malloc(sizeof(*l)); - + struct io_listener *l = tal(ctx, struct io_listener); if (!l) return NULL; @@ -25,272 +29,540 @@ struct io_listener *io_new_listener_(int fd, l->fd.fd = fd; l->init = init; l->arg = arg; - if (!add_listener(l)) { - free(l); - return NULL; - } + l->ctx = ctx; + if (!add_listener(l)) + return tal_free(l); return l; } void io_close_listener(struct io_listener *l) { - close(l->fd.fd); - del_listener(l); - free(l); + tal_free(l); +} + +static struct io_plan *io_never_called(struct io_conn *conn, void *arg) +{ + abort(); } -struct io_conn *io_new_conn_(int fd, - struct io_plan plan, - void (*finish)(struct io_conn *, void *), +/* Returns false if conn was freed. */ +static bool next_plan(struct io_conn *conn, struct io_plan *plan) +{ + struct io_plan *(*next)(struct io_conn *, void *arg); + + next = plan->next; + + plan->status = IO_UNSET; + plan->io = NULL; + plan->next = io_never_called; + + plan = next(conn, plan->next_arg); + + if (plan == &io_conn_freed) + return false; + + assert(plan == &conn->plan[plan->dir]); + assert(conn->plan[IO_IN].status != IO_UNSET + || conn->plan[IO_OUT].status != IO_UNSET); + + backend_new_plan(conn); + return true; +} + +bool io_fd_block(int fd, bool block) +{ + int flags = fcntl(fd, F_GETFL); + + if (flags == -1) + return false; + + if (block) + flags &= ~O_NONBLOCK; + else + flags |= O_NONBLOCK; + + return fcntl(fd, F_SETFL, flags) != -1; +} + +struct io_conn *io_new_conn_(const tal_t *ctx, int fd, + struct io_plan *(*init)(struct io_conn *, void *), void *arg) { - struct io_conn *conn = malloc(sizeof(*conn)); + struct io_conn *conn = tal(ctx, struct io_conn); if (!conn) return NULL; conn->fd.listener = false; conn->fd.fd = fd; - conn->plan = plan; - conn->finish = finish; - conn->finish_arg = arg; - conn->duplex = NULL; - conn->timeout = NULL; - if (!add_conn(conn)) { - free(conn); + conn->finish = NULL; + conn->finish_arg = NULL; + list_node_init(&conn->always); + + if (!add_conn(conn)) + return tal_free(conn); + + /* Keep our I/O async. */ + io_fd_block(fd, false); + + /* So we can get back from plan -> conn later */ + conn->plan[IO_OUT].dir = IO_OUT; + conn->plan[IO_IN].dir = IO_IN; + + /* We start with out doing nothing, and in doing our init. */ + conn->plan[IO_OUT].status = IO_UNSET; + + conn->plan[IO_IN].next = init; + conn->plan[IO_IN].next_arg = arg; + if (!next_plan(conn, &conn->plan[IO_IN])) return NULL; - } + return conn; } -struct io_conn *io_duplex_(struct io_conn *old, - struct io_plan plan, - void (*finish)(struct io_conn *, void *), - void *arg) +void io_set_finish_(struct io_conn *conn, + void (*finish)(struct io_conn *, void *), + void *arg) { - struct io_conn *conn; + conn->finish = finish; + conn->finish_arg = arg; +} - assert(!old->duplex); +struct io_plan_arg *io_plan_arg(struct io_conn *conn, enum io_direction dir) +{ + assert(conn->plan[dir].status == IO_UNSET); - conn = malloc(sizeof(*conn)); - if (!conn) - return NULL; + conn->plan[dir].status = IO_POLLING_NOTSTARTED; + return &conn->plan[dir].arg; +} - conn->fd.listener = false; - conn->fd.fd = old->fd.fd; - conn->plan = plan; - conn->duplex = old; - conn->finish = finish; - conn->finish_arg = arg; - conn->timeout = NULL; - if (!add_duplex(conn)) { - free(conn); - return NULL; - } - old->duplex = conn; - return conn; +static struct io_plan *set_always(struct io_conn *conn, + enum io_direction dir, + struct io_plan *(*next)(struct io_conn *, + void *), + void *arg) +{ + struct io_plan *plan = &conn->plan[dir]; + + plan->status = IO_ALWAYS; + backend_new_always(conn); + return io_set_plan(conn, dir, NULL, next, arg); } -bool io_timeout_(struct io_conn *conn, struct timespec ts, - struct io_plan (*cb)(struct io_conn *, void *), void *arg) +static struct io_plan *io_always_dir(struct io_conn *conn, + enum io_direction dir, + struct io_plan *(*next)(struct io_conn *, + void *), + void *arg) { - assert(cb); + return set_always(conn, dir, next, arg); +} - if (!conn->timeout) { - conn->timeout = malloc(sizeof(*conn->timeout)); - if (!conn->timeout) - return false; - } else - assert(!timeout_active(conn)); +struct io_plan *io_always_(struct io_conn *conn, + struct io_plan *(*next)(struct io_conn *, void *), + void *arg) +{ + return io_always_dir(conn, IO_IN, next, arg); +} - conn->timeout->next = cb; - conn->timeout->next_arg = arg; - backend_add_timeout(conn, ts); - return true; +struct io_plan *io_out_always_(struct io_conn *conn, + struct io_plan *(*next)(struct io_conn *, + void *), + void *arg) +{ + return io_always_dir(conn, IO_OUT, next, arg); } -/* Returns true if we're finished. */ -static bool do_write(int fd, struct io_plan *plan) +static int do_write(int fd, struct io_plan_arg *arg) { - ssize_t ret = write(fd, plan->u.write.buf, plan->u.write.len); - if (ret < 0) { - /* Override next function to close us. */ - plan->next = io_close; - return true; - } + ssize_t ret = write(fd, arg->u1.cp, arg->u2.s); + if (ret < 0) + return -1; - plan->u.write.buf += ret; - plan->u.write.len -= ret; - return (plan->u.write.len == 0); + arg->u1.cp += ret; + arg->u2.s -= ret; + return arg->u2.s == 0; } /* Queue some data to be written. */ -struct io_plan io_write_(const void *data, size_t len, - struct io_plan (*cb)(struct io_conn *, void *), - void *arg) -{ - struct io_plan plan; - - assert(cb); - plan.u.write.buf = data; - plan.u.write.len = len; - plan.io = do_write; - plan.next = cb; - plan.next_arg = arg; - plan.pollflag = POLLOUT; - return plan; +struct io_plan *io_write_(struct io_conn *conn, const void *data, size_t len, + struct io_plan *(*next)(struct io_conn *, void *), + void *next_arg) +{ + struct io_plan_arg *arg = io_plan_arg(conn, IO_OUT); + + if (len == 0) + return set_always(conn, IO_OUT, next, next_arg); + + arg->u1.const_vp = data; + arg->u2.s = len; + + return io_set_plan(conn, IO_OUT, do_write, next, next_arg); } -static bool do_read(int fd, struct io_plan *plan) +static int do_read(int fd, struct io_plan_arg *arg) { - ssize_t ret = read(fd, plan->u.read.buf, plan->u.read.len); + ssize_t ret = read(fd, arg->u1.cp, arg->u2.s); if (ret <= 0) { - /* Override next function to close us. */ - plan->next = io_close; - return true; + /* Errno isn't set if we hit EOF, so set it to distinct value */ + if (ret == 0) + errno = 0; + return -1; } - plan->u.read.buf += ret; - plan->u.read.len -= ret; - return (plan->u.read.len == 0); + arg->u1.cp += ret; + arg->u2.s -= ret; + return arg->u2.s == 0; } /* Queue a request to read into a buffer. */ -struct io_plan io_read_(void *data, size_t len, - struct io_plan (*cb)(struct io_conn *, void *), - void *arg) -{ - struct io_plan plan; - - assert(cb); - plan.u.read.buf = data; - plan.u.read.len = len; - plan.io = do_read; - plan.next = cb; - plan.next_arg = arg; - plan.pollflag = POLLIN; - return plan; +struct io_plan *io_read_(struct io_conn *conn, + void *data, size_t len, + struct io_plan *(*next)(struct io_conn *, void *), + void *next_arg) +{ + struct io_plan_arg *arg = io_plan_arg(conn, IO_IN); + + if (len == 0) + return set_always(conn, IO_IN, next, next_arg); + + arg->u1.cp = data; + arg->u2.s = len; + + return io_set_plan(conn, IO_IN, do_read, next, next_arg); } -static bool do_read_partial(int fd, struct io_plan *plan) +static int do_read_partial(int fd, struct io_plan_arg *arg) { - ssize_t ret = read(fd, plan->u.readpart.buf, *plan->u.readpart.lenp); + ssize_t ret = read(fd, arg->u1.cp, *(size_t *)arg->u2.vp); if (ret <= 0) { - /* Override next function to close us. */ - plan->next = io_close; - return true; + /* Errno isn't set if we hit EOF, so set it to distinct value */ + if (ret == 0) + errno = 0; + return -1; } - *plan->u.readpart.lenp = ret; - return true; + *(size_t *)arg->u2.vp = ret; + return 1; } /* Queue a partial request to read into a buffer. */ -struct io_plan io_read_partial_(void *data, size_t *len, - struct io_plan (*cb)(struct io_conn *, void *), - void *arg) +struct io_plan *io_read_partial_(struct io_conn *conn, + void *data, size_t maxlen, size_t *len, + struct io_plan *(*next)(struct io_conn *, + void *), + void *next_arg) { - struct io_plan plan; + struct io_plan_arg *arg = io_plan_arg(conn, IO_IN); - assert(cb); - plan.u.readpart.buf = data; - plan.u.readpart.lenp = len; - plan.io = do_read_partial; - plan.next = cb; - plan.next_arg = arg; - plan.pollflag = POLLIN; + if (maxlen == 0) + return set_always(conn, IO_IN, next, next_arg); - return plan; + arg->u1.cp = data; + /* We store the max len in here temporarily. */ + *len = maxlen; + arg->u2.vp = len; + + return io_set_plan(conn, IO_IN, do_read_partial, next, next_arg); } -static bool do_write_partial(int fd, struct io_plan *plan) +static int do_write_partial(int fd, struct io_plan_arg *arg) { - ssize_t ret = write(fd, plan->u.writepart.buf, *plan->u.writepart.lenp); - if (ret < 0) { - /* Override next function to close us. */ - plan->next = io_close; - return true; - } + ssize_t ret = write(fd, arg->u1.cp, *(size_t *)arg->u2.vp); + if (ret < 0) + return -1; - *plan->u.writepart.lenp = ret; - return true; + *(size_t *)arg->u2.vp = ret; + return 1; } /* Queue a partial write request. */ -struct io_plan io_write_partial_(const void *data, size_t *len, - struct io_plan (*cb)(struct io_conn*, void *), - void *arg) +struct io_plan *io_write_partial_(struct io_conn *conn, + const void *data, size_t maxlen, size_t *len, + struct io_plan *(*next)(struct io_conn *, + void*), + void *next_arg) { - struct io_plan plan; + struct io_plan_arg *arg = io_plan_arg(conn, IO_OUT); - assert(cb); - plan.u.writepart.buf = data; - plan.u.writepart.lenp = len; - plan.io = do_write_partial; - plan.next = cb; - plan.next_arg = arg; - plan.pollflag = POLLOUT; + if (maxlen == 0) + return set_always(conn, IO_OUT, next, next_arg); - return plan; + arg->u1.const_vp = data; + /* We store the max len in here temporarily. */ + *len = maxlen; + arg->u2.vp = len; + + return io_set_plan(conn, IO_OUT, do_write_partial, next, next_arg); } -struct io_plan io_idle(void) +static int do_connect(int fd, struct io_plan_arg *arg) { - struct io_plan plan; + int err, ret; + socklen_t len = sizeof(err); - plan.pollflag = 0; - plan.io = NULL; - /* Never called (overridded by io_wake), but NULL means closing */ - plan.next = io_close; + /* Has async connect finished? */ + ret = getsockopt(fd, SOL_SOCKET, SO_ERROR, &err, &len); + if (ret < 0) + return -1; - return plan; + if (err == 0) { + return 1; + } else if (err == EINPROGRESS) + return 0; + + errno = err; + return -1; +} + +struct io_plan *io_connect_(struct io_conn *conn, const struct addrinfo *addr, + struct io_plan *(*next)(struct io_conn *, void *), + void *next_arg) +{ + int fd = io_conn_fd(conn); + + /* We don't actually need the arg, but we need it polling. */ + io_plan_arg(conn, IO_OUT); + + /* Note that io_new_conn() will make fd O_NONBLOCK */ + + /* Immediate connect can happen. */ + if (connect(fd, addr->ai_addr, addr->ai_addrlen) == 0) + return set_always(conn, IO_OUT, next, next_arg); + + if (errno != EINPROGRESS) + return io_close(conn); + + return io_set_plan(conn, IO_OUT, do_connect, next, next_arg); +} + +static struct io_plan *io_wait_dir(struct io_conn *conn, + const void *wait, + enum io_direction dir, + struct io_plan *(*next)(struct io_conn *, + void *), + void *next_arg) +{ + struct io_plan_arg *arg = io_plan_arg(conn, dir); + arg->u1.const_vp = wait; + + conn->plan[dir].status = IO_WAITING; + + return io_set_plan(conn, dir, NULL, next, next_arg); +} + +struct io_plan *io_wait_(struct io_conn *conn, + const void *wait, + struct io_plan *(*next)(struct io_conn *, void *), + void *next_arg) +{ + return io_wait_dir(conn, wait, IO_IN, next, next_arg); +} + +struct io_plan *io_out_wait_(struct io_conn *conn, + const void *wait, + struct io_plan *(*next)(struct io_conn *, void *), + void *next_arg) +{ + return io_wait_dir(conn, wait, IO_OUT, next, next_arg); +} + +void io_wake(const void *wait) +{ + backend_wake(wait); } -void io_wake(struct io_conn *conn, struct io_plan plan) +/* Returns false if this should not be touched (eg. freed). */ +static bool do_plan(struct io_conn *conn, struct io_plan *plan, + bool idle_on_epipe) +{ + /* We shouldn't have polled for this event if this wasn't true! */ + assert(plan->status == IO_POLLING_NOTSTARTED + || plan->status == IO_POLLING_STARTED); + + switch (plan->io(conn->fd.fd, &plan->arg)) { + case -1: + if (errno == EPIPE && idle_on_epipe) { + plan->status = IO_UNSET; + backend_new_plan(conn); + return false; + } + io_close(conn); + return false; + case 0: + plan->status = IO_POLLING_STARTED; + return true; + case 1: + return next_plan(conn, plan); + default: + /* IO should only return -1, 0 or 1 */ + abort(); + } +} +void io_ready(struct io_conn *conn, int pollflags) { - /* It might be closing, but we haven't called its finish() yet. */ - if (!conn->plan.next) - return; - /* It was idle, right? */ - assert(!conn->plan.io); - conn->plan = plan; - backend_wakeup(conn); + if (pollflags & POLLIN) + if (!do_plan(conn, &conn->plan[IO_IN], false)) + return; + + if (pollflags & POLLOUT) + /* If we're writing to a closed pipe, we need to wait for + * read to fail if we're duplex: we want to drain it! */ + do_plan(conn, &conn->plan[IO_OUT], + conn->plan[IO_IN].status == IO_POLLING_NOTSTARTED + || conn->plan[IO_IN].status == IO_POLLING_STARTED); } -static struct io_plan do_next(struct io_conn *conn) +void io_do_always(struct io_conn *conn) { - if (timeout_active(conn)) - backend_del_timeout(conn); - return conn->plan.next(conn, conn->plan.next_arg); + /* There's a corner case where the in next_plan wakes up the + * out, placing it in IO_ALWAYS and we end up processing it immediately, + * only to leave it in the always list. + * + * Yet we can't just process one, in case they are both supposed + * to be done, so grab state beforehand. + */ + bool always_out = (conn->plan[IO_OUT].status == IO_ALWAYS); + + if (conn->plan[IO_IN].status == IO_ALWAYS) + if (!next_plan(conn, &conn->plan[IO_IN])) + return; + + if (always_out) { + /* You can't *unalways* a conn (except by freeing, in which + * case next_plan() returned false */ + assert(conn->plan[IO_OUT].status == IO_ALWAYS); + next_plan(conn, &conn->plan[IO_OUT]); + } } -struct io_plan do_ready(struct io_conn *conn) +void io_do_wakeup(struct io_conn *conn, enum io_direction dir) { - if (conn->plan.io(conn->fd.fd, &conn->plan)) - return do_next(conn); + struct io_plan *plan = &conn->plan[dir]; + + assert(plan->status == IO_WAITING); - return conn->plan; + set_always(conn, dir, plan->next, plan->next_arg); } -/* Useful next functions. */ /* Close the connection, we're done. */ -struct io_plan io_close(struct io_conn *conn, void *arg) +struct io_plan *io_close(struct io_conn *conn) { - struct io_plan plan; + tal_free(conn); + return &io_conn_freed; +} - plan.pollflag = 0; - /* This means we're closing. */ - plan.next = NULL; +struct io_plan *io_close_cb(struct io_conn *conn, void *next_arg) +{ + return io_close(conn); +} - return plan; +struct io_plan *io_close_taken_fd(struct io_conn *conn) +{ + io_fd_block(conn->fd.fd, true); + + cleanup_conn_without_close(conn); + return io_close(conn); } /* Exit the loop, returning this (non-NULL) arg. */ -struct io_plan io_break(void *ret, struct io_plan plan) +void io_break(const void *ret) { assert(ret); - io_loop_return = ret; + io_loop_return = (void *)ret; +} + +struct io_plan *io_never(struct io_conn *conn, void *unused) +{ + return io_always(conn, io_never_called, NULL); +} + +int io_conn_fd(const struct io_conn *conn) +{ + return conn->fd.fd; +} + +struct io_plan *io_duplex(struct io_conn *conn, + struct io_plan *in_plan, struct io_plan *out_plan) +{ + assert(conn == container_of(in_plan, struct io_conn, plan[IO_IN])); + /* in_plan must be conn->plan[IO_IN], out_plan must be [IO_OUT] */ + assert(out_plan == in_plan + 1); + return in_plan; +} + +struct io_plan *io_halfclose(struct io_conn *conn) +{ + /* Both unset? OK. */ + if (conn->plan[IO_IN].status == IO_UNSET + && conn->plan[IO_OUT].status == IO_UNSET) + return io_close(conn); + + /* We leave this unset then. */ + if (conn->plan[IO_IN].status == IO_UNSET) + return &conn->plan[IO_IN]; + else + return &conn->plan[IO_OUT]; +} + +struct io_plan *io_set_plan(struct io_conn *conn, enum io_direction dir, + int (*io)(int fd, struct io_plan_arg *arg), + struct io_plan *(*next)(struct io_conn *, void *), + void *next_arg) +{ + struct io_plan *plan = &conn->plan[dir]; + + plan->io = io; + plan->next = next; + plan->next_arg = next_arg; + assert(next != NULL); return plan; } + +bool io_plan_in_started(const struct io_conn *conn) +{ + return conn->plan[IO_IN].status == IO_POLLING_STARTED; +} + +bool io_plan_out_started(const struct io_conn *conn) +{ + return conn->plan[IO_OUT].status == IO_POLLING_STARTED; +} + +bool io_flush_sync(struct io_conn *conn) +{ + struct io_plan *plan = &conn->plan[IO_OUT]; + bool ok; + + /* Not writing? Nothing to do. */ + if (plan->status != IO_POLLING_STARTED + && plan->status != IO_POLLING_NOTSTARTED) + return true; + + /* Synchronous please. */ + io_fd_block(io_conn_fd(conn), true); + +again: + switch (plan->io(conn->fd.fd, &plan->arg)) { + case -1: + ok = false; + break; + /* Incomplete, try again. */ + case 0: + plan->status = IO_POLLING_STARTED; + goto again; + case 1: + ok = true; + /* In case they come back. */ + set_always(conn, IO_OUT, plan->next, plan->next_arg); + break; + default: + /* IO should only return -1, 0 or 1 */ + abort(); + } + + io_fd_block(io_conn_fd(conn), false); + return ok; +}