10 #include <sys/types.h>
15 #include <ccan/read_write_all/read_write_all.h>
16 #include <ccan/failtest/failtest_proto.h>
17 #include <ccan/failtest/failtest.h>
18 #include <ccan/build_assert/build_assert.h>
20 bool (*failtest_hook)(struct failtest_call *history, unsigned num)
21 = failtest_default_hook;
23 static int tracefd = -1;
25 unsigned int failtest_timeout_ms = 20000;
39 /* end is inclusive: you can't have a 0-byte lock. */
44 bool (*failtest_exit_check)(struct failtest_call *history, unsigned num);
46 static struct failtest_call *history = NULL;
47 static unsigned int history_num = 0;
48 static int control_fd = -1;
49 static struct timeval start;
51 static struct write_call *child_writes = NULL;
52 static unsigned int child_writes_num = 0;
54 static pid_t lock_owner;
55 static struct lock_info *locks = NULL;
56 static unsigned int lock_num = 0;
58 static const char info_to_arg[] = "mceoprwf";
60 /* Dummy call used for failtest_undo wrappers. */
61 static struct failtest_call unrecorded_call;
63 static struct failtest_call *add_history_(enum failtest_call_type type,
69 /* NULL file is how we suppress failure. */
71 return &unrecorded_call;
73 history = realloc(history, (history_num + 1) * sizeof(*history));
74 history[history_num].type = type;
75 history[history_num].file = file;
76 history[history_num].line = line;
77 history[history_num].cleanup = NULL;
78 memcpy(&history[history_num].u, elem, elem_size);
79 return &history[history_num++];
82 #define add_history(type, file, line, elem) \
83 add_history_((type), (file), (line), (elem), sizeof(*(elem)))
85 #define set_cleanup(call, clean, type) \
86 (call)->cleanup = (void *)((void)sizeof(clean((type *)NULL)), (clean))
88 bool failtest_default_hook(struct failtest_call *history, unsigned num)
93 static bool read_write_info(int fd)
98 /* We don't need all of this, but it's simple. */
99 child_writes = realloc(child_writes,
100 (child_writes_num+1) * sizeof(child_writes[0]));
101 w = &child_writes[child_writes_num];
102 if (!read_all(fd, w, sizeof(*w)))
105 w->buf = buf = malloc(w->count);
106 if (!read_all(fd, buf, w->count))
113 static char *failpath_string(void)
116 char *ret = malloc(history_num + 1);
118 for (i = 0; i < history_num; i++) {
119 ret[i] = info_to_arg[history[i].type];
121 ret[i] = toupper(ret[i]);
127 static void tell_parent(enum info_type type)
129 if (control_fd != -1)
130 write_all(control_fd, &type, sizeof(type));
133 static void child_fail(const char *out, size_t outlen, const char *fmt, ...)
136 char *path = failpath_string();
139 vfprintf(stderr, fmt, ap);
142 fprintf(stderr, "%.*s", (int)outlen, out);
143 printf("To reproduce: --failpath=%s\n", path);
145 tell_parent(FAILURE);
151 static void hand_down(int signal)
156 static void release_locks(void)
158 /* Locks were never acquired/reacquired? */
162 /* We own them? Release them all. */
163 if (lock_owner == getpid()) {
167 fl.l_whence = SEEK_SET;
171 for (i = 0; i < lock_num; i++)
172 fcntl(locks[i].fd, F_SETLK, &fl);
174 /* Our parent must have them; pass request up. */
175 enum info_type type = RELEASE_LOCKS;
176 assert(control_fd != -1);
177 write_all(control_fd, &type, sizeof(type));
182 /* off_t is a signed type. Getting its max is non-trivial. */
183 static off_t off_max(void)
185 BUILD_ASSERT(sizeof(off_t) == 4 || sizeof(off_t) == 8);
186 if (sizeof(off_t) == 4)
187 return (off_t)0x7FFFFFF;
189 return (off_t)0x7FFFFFFFFFFFFFFULL;
192 static void get_locks(void)
197 if (lock_owner == getpid())
200 if (lock_owner != 0) {
201 enum info_type type = RELEASE_LOCKS;
202 assert(control_fd != -1);
203 write_all(control_fd, &type, sizeof(type));
206 fl.l_whence = SEEK_SET;
208 for (i = 0; i < lock_num; i++) {
209 fl.l_type = locks[i].type;
210 fl.l_start = locks[i].start;
211 if (locks[i].end == off_max())
214 fl.l_len = locks[i].end - locks[i].start + 1;
216 if (fcntl(locks[i].fd, F_SETLKW, &fl) != 0)
219 lock_owner = getpid();
222 static void trace_str(const char *str)
226 while ((ret = write(tracefd, str, strlen(str))) <= 0) {
231 err(1, "Writing trace.");
235 struct saved_file *next;
241 static struct saved_file *save_file(struct saved_file *next, int fd)
243 struct saved_file *s = malloc(sizeof(*s));
247 s->off = lseek(fd, 0, SEEK_CUR);
248 /* Special file? Erk... */
249 assert(s->off != -1);
250 s->len = lseek(fd, 0, SEEK_END);
251 lseek(fd, 0, SEEK_SET);
252 s->contents = malloc(s->len);
253 read(fd, s->contents, s->len);
254 lseek(fd, s->off, SEEK_SET);
258 /* We have little choice but to save and restore open files: mmap means we
259 * can really intercept changes in the child.
261 * We could do non-mmap'ed files on demand, however. */
262 static struct saved_file *save_files(void)
264 struct saved_file *files = NULL;
267 /* Figure out the set of live fds. */
268 for (i = history_num - 2; i >= 0; i--) {
269 if (history[i].type == FAILTEST_OPEN) {
270 int fd = history[i].u.open.ret;
271 /* Only do successful, writable fds. */
275 /* If it was closed, cleanup == NULL. */
276 if (!history[i].cleanup)
279 if ((history[i].u.open.flags & O_RDWR) == O_RDWR) {
280 files = save_file(files, fd);
281 } else if ((history[i].u.open.flags & O_WRONLY)
283 /* FIXME: Handle O_WRONLY. Open with O_RDWR? */
292 static void restore_files(struct saved_file *s)
295 struct saved_file *next = s->next;
297 lseek(s->fd, 0, SEEK_SET);
298 write(s->fd, s->contents, s->len);
299 ftruncate(s->fd, s->len);
301 lseek(s->fd, s->off, SEEK_SET);
307 static bool should_fail(struct failtest_call *call)
310 int control[2], output[2];
311 enum info_type type = UNEXPECTED;
314 struct saved_file *files;
316 if (call == &unrecorded_call)
320 /* + means continue after end, like normal. */
321 if (*failpath == '+')
324 if (tolower(*failpath) != info_to_arg[call->type])
325 errx(1, "Failpath expected '%c' got '%c'\n",
326 info_to_arg[call->type], *failpath);
327 call->fail = isupper(*(failpath++));
332 if (!failtest_hook(history, history_num)) {
337 files = save_files();
339 /* We're going to fail in the child. */
341 if (pipe(control) != 0 || pipe(output) != 0)
342 err(1, "opening pipe");
344 /* Prevent double-printing (in child and parent) */
348 err(1, "forking failed");
354 gettimeofday(&now, NULL);
355 if (now.tv_usec < start.tv_usec) {
357 now.tv_usec += 1000000;
359 now.tv_usec -= start.tv_usec;
360 now.tv_sec -= start.tv_sec;
361 sprintf(str, "%u (%u.%02u): ", getpid(),
362 (int)now.tv_sec, (int)now.tv_usec / 10000);
364 p = failpath_string();
368 p = strchr(history[history_num-1].file, '/');
372 trace_str(history[history_num-1].file);
373 sprintf(str, ":%u)\n", history[history_num-1].line);
378 dup2(output[1], STDOUT_FILENO);
379 dup2(output[1], STDERR_FILENO);
380 if (output[1] != STDOUT_FILENO && output[1] != STDERR_FILENO)
382 control_fd = control[1];
386 signal(SIGUSR1, hand_down);
391 /* We grab output so we can display it; we grab writes so we
394 struct pollfd pfd[2];
397 pfd[0].fd = output[0];
398 pfd[0].events = POLLIN|POLLHUP;
399 pfd[1].fd = control[0];
400 pfd[1].events = POLLIN|POLLHUP;
403 ret = poll(pfd, 1, failtest_timeout_ms);
405 ret = poll(pfd, 2, failtest_timeout_ms);
410 if (pfd[0].revents & POLLIN) {
413 out = realloc(out, outlen + 8192);
414 len = read(output[0], out + outlen, 8192);
416 } else if (type != SUCCESS && (pfd[1].revents & POLLIN)) {
417 if (read_all(control[0], &type, sizeof(type))) {
419 if (!read_write_info(control[0]))
421 } else if (type == RELEASE_LOCKS) {
423 /* FIXME: Tell them we're done... */
426 } else if (pfd[0].revents & POLLHUP) {
429 } while (type != FAILURE);
433 waitpid(child, &status, 0);
434 if (!WIFEXITED(status))
435 child_fail(out, outlen, "Killed by signal %u: ",
437 /* Child printed failure already, just pass up exit code. */
438 if (type == FAILURE) {
439 fprintf(stderr, "%.*s", (int)outlen, out);
441 exit(WEXITSTATUS(status) ? WEXITSTATUS(status) : 1);
443 if (WEXITSTATUS(status) != 0)
444 child_fail(out, outlen, "Exited with status %i: ",
445 WEXITSTATUS(status));
448 signal(SIGUSR1, SIG_DFL);
450 restore_files(files);
452 /* We continue onwards without failing. */
457 static void cleanup_calloc(struct calloc_call *call)
462 void *failtest_calloc(size_t nmemb, size_t size,
463 const char *file, unsigned line)
465 struct failtest_call *p;
466 struct calloc_call call;
469 p = add_history(FAILTEST_CALLOC, file, line, &call);
471 if (should_fail(p)) {
472 p->u.calloc.ret = NULL;
475 p->u.calloc.ret = calloc(nmemb, size);
476 set_cleanup(p, cleanup_calloc, struct calloc_call);
479 return p->u.calloc.ret;
482 static void cleanup_malloc(struct malloc_call *call)
487 void *failtest_malloc(size_t size, const char *file, unsigned line)
489 struct failtest_call *p;
490 struct malloc_call call;
493 p = add_history(FAILTEST_MALLOC, file, line, &call);
494 if (should_fail(p)) {
495 p->u.calloc.ret = NULL;
498 p->u.calloc.ret = malloc(size);
499 set_cleanup(p, cleanup_malloc, struct malloc_call);
502 return p->u.calloc.ret;
505 static void cleanup_realloc(struct realloc_call *call)
510 /* Walk back and find out if we got this ptr from a previous routine. */
511 static void fixup_ptr_history(void *ptr, unsigned int last)
515 /* Start at end of history, work back. */
516 for (i = last - 1; i >= 0; i--) {
517 switch (history[i].type) {
518 case FAILTEST_REALLOC:
519 if (history[i].u.realloc.ret == ptr) {
520 history[i].cleanup = NULL;
524 case FAILTEST_MALLOC:
525 if (history[i].u.malloc.ret == ptr) {
526 history[i].cleanup = NULL;
530 case FAILTEST_CALLOC:
531 if (history[i].u.calloc.ret == ptr) {
532 history[i].cleanup = NULL;
542 void *failtest_realloc(void *ptr, size_t size, const char *file, unsigned line)
544 struct failtest_call *p;
545 struct realloc_call call;
547 p = add_history(FAILTEST_REALLOC, file, line, &call);
549 /* FIXME: Try one child moving allocation, one not. */
550 if (should_fail(p)) {
551 p->u.realloc.ret = NULL;
554 fixup_ptr_history(ptr, history_num-1);
555 p->u.realloc.ret = realloc(ptr, size);
556 set_cleanup(p, cleanup_realloc, struct realloc_call);
559 return p->u.realloc.ret;
562 void failtest_free(void *ptr)
564 fixup_ptr_history(ptr, history_num);
568 static void cleanup_open(struct open_call *call)
573 int failtest_open(const char *pathname,
574 const char *file, unsigned line, ...)
576 struct failtest_call *p;
577 struct open_call call;
580 call.pathname = strdup(pathname);
582 call.flags = va_arg(ap, int);
583 if (call.flags & O_CREAT) {
584 call.mode = va_arg(ap, mode_t);
587 p = add_history(FAILTEST_OPEN, file, line, &call);
588 /* Avoid memory leak! */
589 if (p == &unrecorded_call)
590 free((char *)call.pathname);
591 if (should_fail(p)) {
593 /* FIXME: Play with error codes? */
596 p->u.open.ret = open(pathname, call.flags, call.mode);
597 set_cleanup(p, cleanup_open, struct open_call);
600 return p->u.open.ret;
603 static void cleanup_pipe(struct pipe_call *call)
605 if (!call->closed[0])
607 if (!call->closed[1])
611 int failtest_pipe(int pipefd[2], const char *file, unsigned line)
613 struct failtest_call *p;
614 struct pipe_call call;
616 p = add_history(FAILTEST_PIPE, file, line, &call);
617 if (should_fail(p)) {
619 /* FIXME: Play with error codes? */
622 p->u.pipe.ret = pipe(p->u.pipe.fds);
623 p->u.pipe.closed[0] = p->u.pipe.closed[1] = false;
624 set_cleanup(p, cleanup_pipe, struct pipe_call);
626 /* This causes valgrind to notice if they use pipefd[] after failure */
627 memcpy(pipefd, p->u.pipe.fds, sizeof(p->u.pipe.fds));
629 return p->u.pipe.ret;
632 ssize_t failtest_pread(int fd, void *buf, size_t count, off_t off,
633 const char *file, unsigned line)
635 struct failtest_call *p;
636 struct read_call call;
641 p = add_history(FAILTEST_READ, file, line, &call);
643 /* FIXME: Try partial read returns. */
644 if (should_fail(p)) {
648 p->u.read.ret = pread(fd, buf, count, off);
651 return p->u.read.ret;
654 ssize_t failtest_pwrite(int fd, const void *buf, size_t count, off_t off,
655 const char *file, unsigned line)
657 struct failtest_call *p;
658 struct write_call call;
664 p = add_history(FAILTEST_WRITE, file, line, &call);
666 /* If we're a child, tell parent about write. */
667 if (control_fd != -1) {
668 enum info_type type = WRITE;
670 write_all(control_fd, &type, sizeof(type));
671 write_all(control_fd, &p->u.write, sizeof(p->u.write));
672 write_all(control_fd, buf, count);
675 /* FIXME: Try partial write returns. */
676 if (should_fail(p)) {
680 /* FIXME: We assume same write order in parent and child */
681 if (child_writes_num != 0) {
682 if (child_writes[0].fd != fd)
683 errx(1, "Child wrote to fd %u, not %u?",
684 child_writes[0].fd, fd);
685 if (child_writes[0].off != p->u.write.off)
686 errx(1, "Child wrote to offset %zu, not %zu?",
687 (size_t)child_writes[0].off,
688 (size_t)p->u.write.off);
689 if (child_writes[0].count != count)
690 errx(1, "Child wrote length %zu, not %zu?",
691 child_writes[0].count, count);
692 if (memcmp(child_writes[0].buf, buf, count)) {
694 "Child wrote differently to"
695 " fd %u than we did!\n", fd);
697 free((char *)child_writes[0].buf);
699 memmove(&child_writes[0], &child_writes[1],
700 sizeof(child_writes[0]) * child_writes_num);
702 /* Is this is a socket or pipe, child wrote it
704 if (p->u.write.off == (off_t)-1) {
705 p->u.write.ret = count;
707 return p->u.write.ret;
710 p->u.write.ret = pwrite(fd, buf, count, off);
713 return p->u.write.ret;
716 ssize_t failtest_read(int fd, void *buf, size_t count,
717 const char *file, unsigned line)
719 return failtest_pread(fd, buf, count, lseek(fd, 0, SEEK_CUR),
723 ssize_t failtest_write(int fd, const void *buf, size_t count,
724 const char *file, unsigned line)
726 return failtest_pwrite(fd, buf, count, lseek(fd, 0, SEEK_CUR),
730 static struct lock_info *WARN_UNUSED_RESULT
731 add_lock(struct lock_info *locks, int fd, off_t start, off_t end, int type)
736 for (i = 0; i < lock_num; i++) {
741 /* Four cases we care about:
755 if (start > l->start && end < l->end) {
756 /* Mid overlap: trim entry, add new one. */
757 off_t new_start, new_end;
761 locks = add_lock(locks,
762 fd, new_start, new_end, l->type);
764 } else if (start <= l->start && end >= l->end) {
765 /* Total overlap: eliminate entry. */
768 } else if (end >= l->start && end < l->end) {
769 /* Start overlap: trim entry. */
771 } else if (start > l->start && start <= l->end) {
772 /* End overlap: trim entry. */
775 /* Nothing left? Remove it. */
776 if (l->end < l->start) {
777 memmove(l, l + 1, (--lock_num - i) * sizeof(l[0]));
782 if (type != F_UNLCK) {
783 locks = realloc(locks, (lock_num + 1) * sizeof(*locks));
784 l = &locks[lock_num++];
793 /* We trap this so we can record it: we don't fail it. */
794 int failtest_close(int fd)
801 /* Trace history to find source of fd. */
802 for (i = history_num-1; i >= 0; i--) {
803 switch (history[i].type) {
806 if (history[i].u.pipe.fds[0] == fd) {
807 assert(!history[i].u.pipe.closed[0]);
808 history[i].u.pipe.closed[0] = true;
809 if (history[i].u.pipe.closed[1])
810 history[i].cleanup = NULL;
813 if (history[i].u.pipe.fds[1] == fd) {
814 assert(!history[i].u.pipe.closed[1]);
815 history[i].u.pipe.closed[1] = true;
816 if (history[i].u.pipe.closed[0])
817 history[i].cleanup = NULL;
822 if (history[i].u.open.ret == fd) {
823 assert((void *)history[i].cleanup
824 == (void *)cleanup_open);
825 history[i].cleanup = NULL;
835 locks = add_lock(locks, fd, 0, off_max(), F_UNLCK);
839 /* Zero length means "to end of file" */
840 static off_t end_of(off_t start, off_t len)
844 return start + len - 1;
847 /* FIXME: This only handles locks, really. */
848 int failtest_fcntl(int fd, const char *file, unsigned line, int cmd, ...)
850 struct failtest_call *p;
851 struct fcntl_call call;
857 /* Argument extraction. */
862 call.arg.l = va_arg(ap, long);
864 return fcntl(fd, cmd, call.arg.l);
867 return fcntl(fd, cmd);
871 call.arg.fl = *va_arg(ap, struct flock *);
873 return fcntl(fd, cmd, &call.arg.fl);
877 call.arg.fl = *va_arg(ap, struct flock *);
881 /* This means you need to implement it here. */
882 err(1, "failtest: unknown fcntl %u", cmd);
885 p = add_history(FAILTEST_FCNTL, file, line, &call);
888 if (should_fail(p)) {
890 if (p->u.fcntl.cmd == F_SETLK)
895 p->u.fcntl.ret = fcntl(p->u.fcntl.fd, p->u.fcntl.cmd,
897 if (p->u.fcntl.ret == -1)
900 /* We don't handle anything else yet. */
901 assert(p->u.fcntl.arg.fl.l_whence == SEEK_SET);
902 locks = add_lock(locks,
904 p->u.fcntl.arg.fl.l_start,
905 end_of(p->u.fcntl.arg.fl.l_start,
906 p->u.fcntl.arg.fl.l_len),
907 p->u.fcntl.arg.fl.l_type);
911 return p->u.fcntl.ret;
914 void failtest_init(int argc, char *argv[])
918 for (i = 1; i < argc; i++) {
919 if (!strncmp(argv[i], "--failpath=", strlen("--failpath="))) {
920 failpath = argv[i] + strlen("--failpath=");
921 } else if (strcmp(argv[i], "--tracepath") == 0) {
922 tracefd = dup(STDERR_FILENO);
923 failtest_timeout_ms = -1;
926 gettimeofday(&start, NULL);
929 /* Free up memory, so valgrind doesn't report leaks. */
930 static void free_everything(void)
934 /* We don't do this in cleanup: needed even for failed opens. */
935 for (i = 0; i < history_num; i++) {
936 if (history[i].type == FAILTEST_OPEN)
937 free((char *)history[i].u.open.pathname);
942 void failtest_exit(int status)
946 if (control_fd == -1) {
951 if (failtest_exit_check) {
952 if (!failtest_exit_check(history, history_num))
953 child_fail(NULL, 0, "failtest_exit_check failed\n");
956 /* Cleanup everything, in reverse order. */
957 for (i = history_num - 1; i >= 0; i--)
958 if (history[i].cleanup)
959 history[i].cleanup(&history[i].u);
962 tell_parent(SUCCESS);