X-Git-Url: http://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Fio%2Fio.c;h=90039e1e6960155a87862bbc1e86f9cbc987f35f;hp=76f1f441740476fc52dbf5dc1fc378bc16e16be1;hb=cdf62dce7077a9f9a818edbb67d31d033cbb73c6;hpb=b85c47bb81a9078afc5ddc51448560187348bbbf diff --git a/ccan/io/io.c b/ccan/io/io.c index 76f1f441..90039e1e 100644 --- a/ccan/io/io.c +++ b/ccan/io/io.c @@ -8,12 +8,15 @@ #include #include #include -#include #include #include void *io_loop_return; +struct io_alloc io_alloc = { + malloc, realloc, free +}; + #ifdef DEBUG /* Set to skip the next plan. */ bool io_plan_nodebug; @@ -50,7 +53,7 @@ struct io_plan io_debug(struct io_plan plan) if (!ready->plan.next) { /* Call finish function immediately. */ if (ready->finish) { - errno = ready->plan.u.close.saved_errno; + errno = ready->plan.u1.s; ready->finish(ready, ready->finish_arg); ready->finish = NULL; } @@ -90,6 +93,10 @@ int io_debug_io(int ret) case 1: /* Done: get next plan. */ if (timeout_active(conn)) backend_del_timeout(conn); + /* In case they call io_duplex, clear our poll flags so + * both sides don't seem to be both doing read or write + * (See assert(!mask || pfd->events != mask) in poll.c) */ + conn->plan.pollflag = 0; conn->plan.next(conn, conn->plan.next_arg); break; default: @@ -125,7 +132,7 @@ struct io_listener *io_new_listener_(int fd, void (*init)(int fd, void *arg), void *arg) { - struct io_listener *l = malloc(sizeof(*l)); + struct io_listener *l = io_alloc.alloc(sizeof(*l)); if (!l) return NULL; @@ -135,7 +142,7 @@ struct io_listener *io_new_listener_(int fd, l->init = init; l->arg = arg; if (!add_listener(l)) { - free(l); + io_alloc.free(l); return NULL; } return l; @@ -145,12 +152,12 @@ void io_close_listener(struct io_listener *l) { close(l->fd.fd); del_listener(l); - free(l); + io_alloc.free(l); } struct io_conn *io_new_conn_(int fd, struct io_plan plan) { - struct io_conn *conn = malloc(sizeof(*conn)); + struct io_conn *conn = io_alloc.alloc(sizeof(*conn)); io_plan_debug_again(); @@ -165,7 +172,7 @@ struct io_conn *io_new_conn_(int fd, struct io_plan plan) conn->duplex = NULL; conn->timeout = NULL; if (!add_conn(conn)) { - free(conn); + io_alloc.free(conn); return NULL; } return conn; @@ -187,7 +194,7 @@ struct io_conn *io_duplex_(struct io_conn *old, struct io_plan plan) assert(!old->duplex); - conn = malloc(sizeof(*conn)); + conn = io_alloc.alloc(sizeof(*conn)); if (!conn) return NULL; @@ -199,7 +206,7 @@ struct io_conn *io_duplex_(struct io_conn *old, struct io_plan plan) conn->finish_arg = NULL; conn->timeout = NULL; if (!add_duplex(conn)) { - free(conn); + io_alloc.free(conn); return NULL; } old->duplex = conn; @@ -212,7 +219,7 @@ bool io_timeout_(struct io_conn *conn, struct timespec ts, assert(cb); if (!conn->timeout) { - conn->timeout = malloc(sizeof(*conn->timeout)); + conn->timeout = io_alloc.alloc(sizeof(*conn->timeout)); if (!conn->timeout) return false; } else @@ -224,16 +231,36 @@ bool io_timeout_(struct io_conn *conn, struct timespec ts, return true; } +/* Always done: call the next thing. */ +static int do_always(int fd, struct io_plan *plan) +{ + return 1; +} + +struct io_plan io_always_(struct io_plan (*cb)(struct io_conn *, void *), + void *arg) +{ + struct io_plan plan; + + assert(cb); + plan.io = do_always; + plan.next = cb; + plan.next_arg = arg; + plan.pollflag = POLLALWAYS; + + return plan; +} + /* Returns true if we're finished. */ static int do_write(int fd, struct io_plan *plan) { - ssize_t ret = write(fd, plan->u.write.buf, plan->u.write.len); + ssize_t ret = write(fd, plan->u1.cp, plan->u2.s); if (ret < 0) return io_debug_io(-1); - plan->u.write.buf += ret; - plan->u.write.len -= ret; - return io_debug_io(plan->u.write.len == 0); + plan->u1.cp += ret; + plan->u2.s -= ret; + return io_debug_io(plan->u2.s == 0); } /* Queue some data to be written. */ @@ -244,8 +271,12 @@ struct io_plan io_write_(const void *data, size_t len, struct io_plan plan; assert(cb); - plan.u.write.buf = data; - plan.u.write.len = len; + + if (len == 0) + return io_always_(cb, arg); + + plan.u1.const_vp = data; + plan.u2.s = len; plan.io = do_write; plan.next = cb; plan.next_arg = arg; @@ -256,13 +287,13 @@ struct io_plan io_write_(const void *data, size_t len, static int do_read(int fd, struct io_plan *plan) { - ssize_t ret = read(fd, plan->u.read.buf, plan->u.read.len); + ssize_t ret = read(fd, plan->u1.cp, plan->u2.s); if (ret <= 0) return io_debug_io(-1); - plan->u.read.buf += ret; - plan->u.read.len -= ret; - return io_debug_io(plan->u.read.len == 0); + plan->u1.cp += ret; + plan->u2.s -= ret; + return io_debug_io(plan->u2.s == 0); } /* Queue a request to read into a buffer. */ @@ -273,11 +304,16 @@ struct io_plan io_read_(void *data, size_t len, struct io_plan plan; assert(cb); - plan.u.read.buf = data; - plan.u.read.len = len; + + if (len == 0) + return io_always_(cb, arg); + + plan.u1.cp = data; + plan.u2.s = len; plan.io = do_read; plan.next = cb; plan.next_arg = arg; + plan.pollflag = POLLIN; return plan; @@ -285,11 +321,11 @@ struct io_plan io_read_(void *data, size_t len, static int do_read_partial(int fd, struct io_plan *plan) { - ssize_t ret = read(fd, plan->u.readpart.buf, *plan->u.readpart.lenp); + ssize_t ret = read(fd, plan->u1.cp, *(size_t *)plan->u2.vp); if (ret <= 0) return io_debug_io(-1); - *plan->u.readpart.lenp = ret; + *(size_t *)plan->u2.vp = ret; return io_debug_io(1); } @@ -301,8 +337,12 @@ struct io_plan io_read_partial_(void *data, size_t *len, struct io_plan plan; assert(cb); - plan.u.readpart.buf = data; - plan.u.readpart.lenp = len; + + if (*len == 0) + return io_always_(cb, arg); + + plan.u1.cp = data; + plan.u2.vp = len; plan.io = do_read_partial; plan.next = cb; plan.next_arg = arg; @@ -313,11 +353,11 @@ struct io_plan io_read_partial_(void *data, size_t *len, static int do_write_partial(int fd, struct io_plan *plan) { - ssize_t ret = write(fd, plan->u.writepart.buf, *plan->u.writepart.lenp); + ssize_t ret = write(fd, plan->u1.cp, *(size_t *)plan->u2.vp); if (ret < 0) return io_debug_io(-1); - *plan->u.writepart.lenp = ret; + *(size_t *)plan->u2.vp = ret; return io_debug_io(1); } @@ -329,8 +369,12 @@ struct io_plan io_write_partial_(const void *data, size_t *len, struct io_plan plan; assert(cb); - plan.u.writepart.buf = data; - plan.u.writepart.lenp = len; + + if (*len == 0) + return io_always_(cb, arg); + + plan.u1.const_vp = data; + plan.u2.vp = len; plan.io = do_write_partial; plan.next = cb; plan.next_arg = arg; @@ -356,7 +400,7 @@ static int do_connect(int fd, struct io_plan *plan) if (err == 0) { /* Restore blocking if it was initially. */ - fcntl(fd, F_SETFD, plan->u.len_len.len1); + fcntl(fd, F_SETFD, plan->u1.s); return 1; } return 0; @@ -374,8 +418,8 @@ struct io_plan io_connect_(int fd, const struct addrinfo *addr, plan.next_arg = arg; /* Save old flags, set nonblock if not already. */ - plan.u.len_len.len1 = fcntl(fd, F_GETFD); - fcntl(fd, F_SETFD, plan.u.len_len.len1 | O_NONBLOCK); + plan.u1.s = fcntl(fd, F_GETFD); + fcntl(fd, F_SETFD, plan.u1.s | O_NONBLOCK); /* Immediate connect can happen. */ if (connect(fd, addr->ai_addr, addr->ai_addrlen) == 0) { @@ -404,6 +448,11 @@ struct io_plan io_idle_(void) return plan; } +bool io_is_idle(const struct io_conn *conn) +{ + return conn->plan.io == NULL; +} + void io_wake_(struct io_conn *conn, struct io_plan plan) { @@ -422,6 +471,10 @@ void io_wake_(struct io_conn *conn, struct io_plan plan) void io_ready(struct io_conn *conn) { + /* Beware io_close_other! */ + if (!conn->plan.next) + return; + set_current(conn); switch (conn->plan.io(conn->fd.fd, &conn->plan)) { case -1: /* Failure means a new plan: close up. */ @@ -433,6 +486,10 @@ void io_ready(struct io_conn *conn) case 1: /* Done: get next plan. */ if (timeout_active(conn)) backend_del_timeout(conn); + /* In case they call io_duplex, clear our poll flags so + * both sides don't seem to be both doing read or write + * (See assert(!mask || pfd->events != mask) in poll.c) */ + conn->plan.pollflag = 0; conn->plan = conn->plan.next(conn, conn->plan.next_arg); backend_plan_changed(conn); } @@ -447,7 +504,7 @@ struct io_plan io_close_(void) plan.pollflag = 0; /* This means we're closing. */ plan.next = NULL; - plan.u.close.saved_errno = errno; + plan.u1.s = errno; return plan; } @@ -457,6 +514,12 @@ struct io_plan io_close_cb(struct io_conn *conn, void *arg) return io_close(); } +void io_close_other(struct io_conn *conn) +{ + conn->plan = io_close_(); + backend_plan_changed(conn); +} + /* Exit the loop, returning this (non-NULL) arg. */ struct io_plan io_break_(void *ret, struct io_plan plan) { @@ -467,3 +530,27 @@ struct io_plan io_break_(void *ret, struct io_plan plan) return plan; } + +static struct io_plan io_never_called(struct io_conn *conn, void *arg) +{ + abort(); +} + +struct io_plan io_never(void) +{ + return io_always_(io_never_called, NULL); +} + +int io_conn_fd(const struct io_conn *conn) +{ + return conn->fd.fd; +} + +void io_set_alloc(void *(*allocfn)(size_t size), + void *(*reallocfn)(void *ptr, size_t size), + void (*freefn)(void *ptr)) +{ + io_alloc.alloc = allocfn; + io_alloc.realloc = reallocfn; + io_alloc.free = freefn; +}