1 /* Licensed under LGPLv2.1+ - see LICENSE file for details */
5 #include <sys/socket.h>
15 struct io_listener *io_new_listener_(int fd,
16 struct io_plan (*start)(struct io_conn *,
18 void (*finish)(struct io_conn *, void *),
21 struct io_listener *l = malloc(sizeof(*l));
26 l->fd.listener = true;
31 if (!add_listener(l)) {
38 void io_close_listener(struct io_listener *l)
45 struct io_conn *io_new_conn_(int fd,
46 struct io_plan (*start)(struct io_conn *, void *),
47 void (*finish)(struct io_conn *, void *),
50 struct io_conn *conn = malloc(sizeof(*conn));
55 conn->fd.listener = false;
57 conn->plan.next = start;
58 conn->finish = finish;
59 conn->finish_arg = conn->plan.next_arg = arg;
60 conn->plan.pollflag = 0;
61 conn->plan.state = IO_NEXT;
64 if (!add_conn(conn)) {
71 struct io_conn *io_duplex_(struct io_conn *old,
72 struct io_plan (*start)(struct io_conn *, void *),
73 void (*finish)(struct io_conn *, void *),
80 conn = malloc(sizeof(*conn));
84 conn->fd.listener = false;
85 conn->fd.fd = old->fd.fd;
86 conn->plan.next = start;
87 conn->finish = finish;
88 conn->finish_arg = conn->plan.next_arg = arg;
89 conn->plan.pollflag = 0;
90 conn->plan.state = IO_NEXT;
93 if (!add_duplex(conn)) {
101 bool io_timeout_(struct io_conn *conn, struct timespec ts,
102 struct io_plan (*cb)(struct io_conn *, void *), void *arg)
104 if (!conn->timeout) {
105 conn->timeout = malloc(sizeof(*conn->timeout));
109 assert(!timeout_active(conn));
111 conn->timeout->next = cb;
112 conn->timeout->next_arg = arg;
113 backend_add_timeout(conn, ts);
117 static enum io_result do_write(struct io_conn *conn)
119 ssize_t ret = write(conn->fd.fd, conn->plan.u.write.buf, conn->plan.u.write.len);
123 conn->plan.u.write.buf += ret;
124 conn->plan.u.write.len -= ret;
125 if (conn->plan.u.write.len == 0)
126 return RESULT_FINISHED;
131 /* Queue some data to be written. */
132 struct io_plan io_write_(const void *data, size_t len,
133 struct io_plan (*cb)(struct io_conn *, void *),
138 plan.u.write.buf = data;
139 plan.u.write.len = len;
143 plan.pollflag = POLLOUT;
148 static enum io_result do_read(struct io_conn *conn)
150 ssize_t ret = read(conn->fd.fd, conn->plan.u.read.buf,
151 conn->plan.u.read.len);
154 conn->plan.u.read.buf += ret;
155 conn->plan.u.read.len -= ret;
156 if (conn->plan.u.read.len == 0)
157 return RESULT_FINISHED;
162 /* Queue a request to read into a buffer. */
163 struct io_plan io_read_(void *data, size_t len,
164 struct io_plan (*cb)(struct io_conn *, void *),
169 plan.u.read.buf = data;
170 plan.u.read.len = len;
174 plan.pollflag = POLLIN;
179 static enum io_result do_read_partial(struct io_conn *conn)
181 ssize_t ret = read(conn->fd.fd, conn->plan.u.readpart.buf,
182 *conn->plan.u.readpart.lenp);
185 *conn->plan.u.readpart.lenp = ret;
186 return RESULT_FINISHED;
189 /* Queue a partial request to read into a buffer. */
190 struct io_plan io_read_partial_(void *data, size_t *len,
191 struct io_plan (*cb)(struct io_conn *, void *),
196 plan.u.readpart.buf = data;
197 plan.u.readpart.lenp = len;
198 plan.io = do_read_partial;
201 plan.pollflag = POLLIN;
207 static enum io_result do_write_partial(struct io_conn *conn)
209 ssize_t ret = write(conn->fd.fd, conn->plan.u.writepart.buf,
210 *conn->plan.u.writepart.lenp);
213 *conn->plan.u.writepart.lenp = ret;
214 return RESULT_FINISHED;
217 /* Queue a partial write request. */
218 struct io_plan io_write_partial_(const void *data, size_t *len,
219 struct io_plan (*cb)(struct io_conn*, void *),
224 plan.u.writepart.buf = data;
225 plan.u.writepart.lenp = len;
226 plan.io = do_write_partial;
229 plan.pollflag = POLLOUT;
235 struct io_plan io_idle(void)
240 plan.state = IO_IDLE;
245 void io_wake_(struct io_conn *conn,
246 struct io_plan (*fn)(struct io_conn *, void *), void *arg)
249 /* It might have finished, but we haven't called its finish() yet. */
250 if (conn->plan.state == IO_FINISHED)
252 assert(conn->plan.state == IO_IDLE);
253 conn->plan.next = fn;
254 conn->plan.next_arg = arg;
255 conn->plan.pollflag = 0;
256 conn->plan.state = IO_NEXT;
257 backend_wakeup(conn);
260 static struct io_plan do_next(struct io_conn *conn)
262 if (timeout_active(conn))
263 backend_del_timeout(conn);
264 return conn->plan.next(conn, conn->plan.next_arg);
267 struct io_plan do_ready(struct io_conn *conn)
269 assert(conn->plan.state == IO_IO);
270 switch (conn->plan.io(conn)) {
272 return io_close(conn, NULL);
273 case RESULT_FINISHED:
274 return do_next(conn);
282 /* Useful next functions. */
283 /* Close the connection, we're done. */
284 struct io_plan io_close(struct io_conn *conn, void *arg)
288 plan.state = IO_FINISHED;
294 /* Exit the loop, returning this (non-NULL) arg. */
295 struct io_plan io_break_(void *ret,
296 struct io_plan (*fn)(struct io_conn *, void *),
301 io_loop_return = ret;
303 plan.state = IO_NEXT;