1 /* Licensed under LGPLv2.1+ - see LICENSE file for details */
5 #include <sys/socket.h>
15 struct io_listener *io_new_listener_(int fd,
16 void (*init)(int fd, void *arg),
19 struct io_listener *l = malloc(sizeof(*l));
24 l->fd.listener = true;
28 if (!add_listener(l)) {
35 void io_close_listener(struct io_listener *l)
42 struct io_conn *io_new_conn_(int fd,
43 struct io_plan (*start)(struct io_conn *, void *),
44 void (*finish)(struct io_conn *, void *),
47 struct io_conn *conn = malloc(sizeof(*conn));
52 conn->fd.listener = false;
54 conn->plan.next = start;
55 conn->finish = finish;
56 conn->finish_arg = conn->plan.next_arg = arg;
57 conn->plan.pollflag = 0;
58 conn->plan.state = IO_NEXT;
61 if (!add_conn(conn)) {
68 struct io_conn *io_duplex_(struct io_conn *old,
69 struct io_plan (*start)(struct io_conn *, void *),
70 void (*finish)(struct io_conn *, void *),
77 conn = malloc(sizeof(*conn));
81 conn->fd.listener = false;
82 conn->fd.fd = old->fd.fd;
83 conn->plan.next = start;
84 conn->finish = finish;
85 conn->finish_arg = conn->plan.next_arg = arg;
86 conn->plan.pollflag = 0;
87 conn->plan.state = IO_NEXT;
90 if (!add_duplex(conn)) {
98 bool io_timeout_(struct io_conn *conn, struct timespec ts,
99 struct io_plan (*cb)(struct io_conn *, void *), void *arg)
101 if (!conn->timeout) {
102 conn->timeout = malloc(sizeof(*conn->timeout));
106 assert(!timeout_active(conn));
108 conn->timeout->next = cb;
109 conn->timeout->next_arg = arg;
110 backend_add_timeout(conn, ts);
114 static enum io_result do_write(struct io_conn *conn)
116 ssize_t ret = write(conn->fd.fd, conn->plan.u.write.buf, conn->plan.u.write.len);
120 conn->plan.u.write.buf += ret;
121 conn->plan.u.write.len -= ret;
122 if (conn->plan.u.write.len == 0)
123 return RESULT_FINISHED;
128 /* Queue some data to be written. */
129 struct io_plan io_write_(const void *data, size_t len,
130 struct io_plan (*cb)(struct io_conn *, void *),
135 plan.u.write.buf = data;
136 plan.u.write.len = len;
140 plan.pollflag = POLLOUT;
145 static enum io_result do_read(struct io_conn *conn)
147 ssize_t ret = read(conn->fd.fd, conn->plan.u.read.buf,
148 conn->plan.u.read.len);
151 conn->plan.u.read.buf += ret;
152 conn->plan.u.read.len -= ret;
153 if (conn->plan.u.read.len == 0)
154 return RESULT_FINISHED;
159 /* Queue a request to read into a buffer. */
160 struct io_plan io_read_(void *data, size_t len,
161 struct io_plan (*cb)(struct io_conn *, void *),
166 plan.u.read.buf = data;
167 plan.u.read.len = len;
171 plan.pollflag = POLLIN;
176 static enum io_result do_read_partial(struct io_conn *conn)
178 ssize_t ret = read(conn->fd.fd, conn->plan.u.readpart.buf,
179 *conn->plan.u.readpart.lenp);
182 *conn->plan.u.readpart.lenp = ret;
183 return RESULT_FINISHED;
186 /* Queue a partial request to read into a buffer. */
187 struct io_plan io_read_partial_(void *data, size_t *len,
188 struct io_plan (*cb)(struct io_conn *, void *),
193 plan.u.readpart.buf = data;
194 plan.u.readpart.lenp = len;
195 plan.io = do_read_partial;
198 plan.pollflag = POLLIN;
204 static enum io_result do_write_partial(struct io_conn *conn)
206 ssize_t ret = write(conn->fd.fd, conn->plan.u.writepart.buf,
207 *conn->plan.u.writepart.lenp);
210 *conn->plan.u.writepart.lenp = ret;
211 return RESULT_FINISHED;
214 /* Queue a partial write request. */
215 struct io_plan io_write_partial_(const void *data, size_t *len,
216 struct io_plan (*cb)(struct io_conn*, void *),
221 plan.u.writepart.buf = data;
222 plan.u.writepart.lenp = len;
223 plan.io = do_write_partial;
226 plan.pollflag = POLLOUT;
232 struct io_plan io_idle(void)
237 plan.state = IO_IDLE;
242 void io_wake_(struct io_conn *conn,
243 struct io_plan (*fn)(struct io_conn *, void *), void *arg)
246 /* It might have finished, but we haven't called its finish() yet. */
247 if (conn->plan.state == IO_FINISHED)
249 assert(conn->plan.state == IO_IDLE);
250 conn->plan.next = fn;
251 conn->plan.next_arg = arg;
252 conn->plan.pollflag = 0;
253 conn->plan.state = IO_NEXT;
254 backend_wakeup(conn);
257 static struct io_plan do_next(struct io_conn *conn)
259 if (timeout_active(conn))
260 backend_del_timeout(conn);
261 return conn->plan.next(conn, conn->plan.next_arg);
264 struct io_plan do_ready(struct io_conn *conn)
266 assert(conn->plan.state == IO_IO);
267 switch (conn->plan.io(conn)) {
269 return io_close(conn, NULL);
270 case RESULT_FINISHED:
271 return do_next(conn);
279 /* Useful next functions. */
280 /* Close the connection, we're done. */
281 struct io_plan io_close(struct io_conn *conn, void *arg)
285 plan.state = IO_FINISHED;
291 /* Exit the loop, returning this (non-NULL) arg. */
292 struct io_plan io_break_(void *ret,
293 struct io_plan (*fn)(struct io_conn *, void *),
298 io_loop_return = ret;
300 plan.state = IO_NEXT;