1 /* Licensed under LGPL - see LICENSE file for details */
2 #include <ccan/failtest/failtest.h>
11 #include <sys/types.h>
16 #include <sys/resource.h>
19 #include <ccan/err/err.h>
20 #include <ccan/time/time.h>
21 #include <ccan/read_write_all/read_write_all.h>
22 #include <ccan/failtest/failtest_proto.h>
23 #include <ccan/build_assert/build_assert.h>
24 #include <ccan/hash/hash.h>
25 #include <ccan/htable/htable_type.h>
26 #include <ccan/str/str.h>
27 #include <ccan/compiler/compiler.h>
29 enum failtest_result (*failtest_hook)(struct tlist_calls *);
31 static FILE *tracef = NULL, *warnf;
32 static int traceindent = 0;
34 unsigned int failtest_timeout_ms = 20000;
37 const char *debugpath;
49 /* end is inclusive: you can't have a 0-byte lock. */
54 /* We hash the call location together with its backtrace. */
55 static size_t hash_call(const struct failtest_call *call)
57 return hash(call->file, strlen(call->file),
59 hash(call->backtrace, call->backtrace_num,
63 static bool call_eq(const struct failtest_call *call1,
64 const struct failtest_call *call2)
68 if (strcmp(call1->file, call2->file) != 0
69 || call1->line != call2->line
70 || call1->type != call2->type
71 || call1->backtrace_num != call2->backtrace_num)
74 for (i = 0; i < call1->backtrace_num; i++)
75 if (call1->backtrace[i] != call2->backtrace[i])
81 /* Defines struct failtable. */
82 HTABLE_DEFINE_TYPE(struct failtest_call, (struct failtest_call *), hash_call,
85 bool (*failtest_exit_check)(struct tlist_calls *history);
87 /* The entire history of all calls. */
88 static struct tlist_calls history = TLIST_INIT(history);
89 /* If we're a child, the fd two write control info to the parent. */
90 static int control_fd = -1;
91 /* If we're a child, this is the first call we did ourselves. */
92 static struct failtest_call *our_history_start = NULL;
93 /* For printing runtime with --trace. */
94 static struct timeabs start;
95 /* Set when failtest_hook returns FAIL_PROBE */
96 static bool probing = false;
97 /* Table to track duplicates. */
98 static struct failtable failtable;
100 /* Array of writes which our child did. We report them on failure. */
101 static struct write_call *child_writes = NULL;
102 static unsigned int child_writes_num = 0;
104 /* fcntl locking info. */
105 static pid_t lock_owner;
106 static struct lock_info *locks = NULL;
107 static unsigned int lock_num = 0;
109 /* Our original pid, which we return to anyone who asks. */
110 static pid_t orig_pid;
112 /* Mapping from failtest_type to char. */
113 static const char info_to_arg[] = "mceoxprwfal";
115 /* Dummy call used for failtest_undo wrappers. */
116 static struct failtest_call unrecorded_call;
118 struct contents_saved {
125 /* File contents, saved in this child only. */
126 struct saved_mmapped_file {
127 struct saved_mmapped_file *next;
128 struct failtest_call *opener;
129 struct contents_saved *s;
132 static struct saved_mmapped_file *saved_mmapped_files;
135 #include <execinfo.h>
137 static void **get_backtrace(unsigned int *num)
139 static unsigned int max_back = 100;
143 ret = malloc(max_back * sizeof(void *));
144 *num = backtrace(ret, max_back);
145 if (*num == max_back) {
153 /* This will test slightly less, since will consider all of the same
154 * calls as identical. But, it's slightly faster! */
155 static void **get_backtrace(unsigned int *num)
160 #endif /* HAVE_BACKTRACE */
162 static struct failtest_call *add_history_(enum failtest_call_type type,
169 struct failtest_call *call;
171 /* NULL file is how we suppress failure. */
173 return &unrecorded_call;
175 call = malloc(sizeof *call);
177 call->can_leak = can_leak;
180 call->cleanup = NULL;
181 call->backtrace = get_backtrace(&call->backtrace_num);
183 memcpy(&call->u, elem, elem_size);
184 tlist_add_tail(&history, call, list);
188 #define add_history(type, can_leak, file, line, elem) \
189 add_history_((type), (can_leak), (file), (line), (elem), sizeof(*(elem)))
191 /* We do a fake call inside a sizeof(), to check types. */
192 #define set_cleanup(call, clean, type) \
193 (call)->cleanup = (void *)((void)sizeof(clean((type *)NULL, false),1), (clean))
195 /* Dup the fd to a high value (out of the way I hope!), and close the old fd. */
196 static int move_fd_to_high(int fd)
202 if (getrlimit(RLIMIT_NOFILE, &lim) == 0) {
204 printf("Max is %i\n", max);
208 for (i = max - 1; i > fd; i--) {
209 if (fcntl(i, F_GETFL) == -1 && errno == EBADF) {
210 if (dup2(fd, i) == -1) {
211 warn("Failed to dup fd %i to %i", fd, i);
218 /* Nothing? Really? Er... ok? */
222 static bool read_write_info(int fd)
224 struct write_call *w;
227 /* We don't need all of this, but it's simple. */
228 child_writes = realloc(child_writes,
229 (child_writes_num+1) * sizeof(child_writes[0]));
230 w = &child_writes[child_writes_num];
231 if (!read_all(fd, w, sizeof(*w)))
234 w->buf = buf = malloc(w->count);
235 if (!read_all(fd, buf, w->count))
242 static char *failpath_string(void)
244 struct failtest_call *i;
245 char *ret = strdup("");
248 /* Inefficient, but who cares? */
249 tlist_for_each(&history, i, list) {
250 ret = realloc(ret, len + 2);
251 ret[len] = info_to_arg[i->type];
253 ret[len] = toupper(ret[len]);
259 static void do_warn(int e, const char *fmt, va_list ap)
261 char *p = failpath_string();
263 vfprintf(warnf, fmt, ap);
265 fprintf(warnf, ": %s", strerror(e));
266 fprintf(warnf, " [%s]\n", p);
270 static void fwarn(const char *fmt, ...)
281 static void fwarnx(const char *fmt, ...)
286 do_warn(-1, fmt, ap);
290 static void tell_parent(enum info_type type)
292 if (control_fd != -1)
293 write_all(control_fd, &type, sizeof(type));
296 static void child_fail(const char *out, size_t outlen, const char *fmt, ...)
299 char *path = failpath_string();
302 vfprintf(stderr, fmt, ap);
305 fprintf(stderr, "%.*s", (int)outlen, out);
306 printf("To reproduce: --failpath=%s\n", path);
308 tell_parent(FAILURE);
312 static void PRINTF_FMT(1, 2) trace(const char *fmt, ...)
322 for (i = 0; i < traceindent; i++)
323 fprintf(tracef, " ");
325 p = failpath_string();
326 fprintf(tracef, "%i: %u: %s ", idx++, getpid(), p);
328 vfprintf(tracef, fmt, ap);
335 static void hand_down(int signum)
340 static void release_locks(void)
342 /* Locks were never acquired/reacquired? */
346 /* We own them? Release them all. */
347 if (lock_owner == getpid()) {
351 fl.l_whence = SEEK_SET;
355 trace("Releasing %u locks\n", lock_num);
356 for (i = 0; i < lock_num; i++)
357 fcntl(locks[i].fd, F_SETLK, &fl);
359 /* Our parent must have them; pass request up. */
360 enum info_type type = RELEASE_LOCKS;
361 assert(control_fd != -1);
362 write_all(control_fd, &type, sizeof(type));
367 /* off_t is a signed type. Getting its max is non-trivial. */
368 static off_t off_max(void)
370 BUILD_ASSERT(sizeof(off_t) == 4 || sizeof(off_t) == 8);
371 if (sizeof(off_t) == 4)
372 return (off_t)0x7FFFFFF;
374 return (off_t)0x7FFFFFFFFFFFFFFULL;
377 static void get_locks(void)
382 if (lock_owner == getpid())
385 if (lock_owner != 0) {
386 enum info_type type = RELEASE_LOCKS;
387 assert(control_fd != -1);
388 trace("Asking parent to release locks\n");
389 write_all(control_fd, &type, sizeof(type));
392 fl.l_whence = SEEK_SET;
394 for (i = 0; i < lock_num; i++) {
395 fl.l_type = locks[i].type;
396 fl.l_start = locks[i].start;
397 if (locks[i].end == off_max())
400 fl.l_len = locks[i].end - locks[i].start + 1;
402 if (fcntl(locks[i].fd, F_SETLKW, &fl) != 0)
405 trace("Acquired %u locks\n", lock_num);
406 lock_owner = getpid();
410 static struct contents_saved *save_contents(const char *filename,
411 int fd, size_t count, off_t off,
414 struct contents_saved *s = malloc(sizeof(*s) + count);
419 ret = pread(fd, s->contents, count, off);
421 fwarn("failtest_write: failed to save old contents!");
426 /* Use lseek to get the size of file, but we have to restore
428 off = lseek(fd, 0, SEEK_CUR);
429 s->old_len = lseek(fd, 0, SEEK_END);
430 lseek(fd, off, SEEK_SET);
432 trace("Saving %p %s %zu@%llu after %s (filelength %llu) via fd %i\n",
433 s, filename, s->count, (long long)s->off, why,
434 (long long)s->old_len, fd);
438 static void restore_contents(struct failtest_call *opener,
439 struct contents_saved *s,
445 /* The top parent doesn't need to restore. */
446 if (control_fd == -1)
449 /* Has the fd been closed? */
450 if (opener->u.open.closed) {
451 /* Reopen, replace fd, close silently as we clean up. */
452 fd = open(opener->u.open.pathname, O_RDWR);
454 fwarn("failtest: could not reopen %s to clean up %s!",
455 opener->u.open.pathname, caller);
458 /* Make it clearly distinguisable from a "normal" fd. */
459 fd = move_fd_to_high(fd);
460 trace("Reopening %s to restore it (was fd %i, now %i)\n",
461 opener->u.open.pathname, opener->u.open.ret, fd);
462 opener->u.open.ret = fd;
463 opener->u.open.closed = false;
465 fd = opener->u.open.ret;
467 trace("Restoring %p %s %zu@%llu after %s (filelength %llu) via fd %i\n",
468 s, opener->u.open.pathname, s->count, (long long)s->off, caller,
469 (long long)s->old_len, fd);
470 if (pwrite(fd, s->contents, s->count, s->off) != s->count) {
471 fwarn("failtest: write failed cleaning up %s for %s!",
472 opener->u.open.pathname, caller);
475 if (ftruncate(fd, s->old_len) != 0) {
476 fwarn("failtest_write: truncate failed cleaning up %s for %s!",
477 opener->u.open.pathname, caller);
480 if (restore_offset) {
481 trace("Restoring offset of fd %i to %llu\n",
482 fd, (long long)s->off);
483 lseek(fd, s->off, SEEK_SET);
487 /* We save/restore most things on demand, but always do mmaped files. */
488 static void save_mmapped_files(void)
490 struct failtest_call *i;
491 trace("Saving mmapped files in child\n");
493 tlist_for_each_rev(&history, i, list) {
494 struct mmap_call *m = &i->u.mmap;
495 struct saved_mmapped_file *s;
497 if (i->type != FAILTEST_MMAP)
500 /* FIXME: We only handle mmapped files where fd is still open. */
501 if (m->opener->u.open.closed)
504 s = malloc(sizeof *s);
505 s->s = save_contents(m->opener->u.open.pathname,
506 m->fd, m->length, m->offset,
507 "mmapped file before fork");
508 s->opener = m->opener;
509 s->next = saved_mmapped_files;
510 saved_mmapped_files = s;
514 static void free_mmapped_files(bool restore)
516 trace("%s mmapped files in child\n",
517 restore ? "Restoring" : "Discarding");
518 while (saved_mmapped_files) {
519 struct saved_mmapped_file *next = saved_mmapped_files->next;
521 restore_contents(saved_mmapped_files->opener,
522 saved_mmapped_files->s, false,
524 free(saved_mmapped_files->s);
525 free(saved_mmapped_files);
526 saved_mmapped_files = next;
530 /* Returns a FAILTEST_OPEN, FAILTEST_PIPE or NULL. */
531 static struct failtest_call *opener_of(int fd)
533 struct failtest_call *i;
535 /* Don't get confused and match genuinely failed opens. */
539 /* Figure out the set of live fds. */
540 tlist_for_each_rev(&history, i, list) {
545 if (i->u.close.fd == fd) {
550 if (i->u.open.ret == fd) {
551 if (i->u.open.closed)
557 if (i->u.pipe.fds[0] == fd || i->u.pipe.fds[1] == fd) {
566 /* FIXME: socket, dup, etc are untracked! */
570 static void free_call(struct failtest_call *call)
572 /* We don't do this in cleanup: needed even for failed opens. */
573 if (call->type == FAILTEST_OPEN)
574 free((char *)call->u.open.pathname);
575 free(call->backtrace);
576 tlist_del_from(&history, call, list);
580 /* Free up memory, so valgrind doesn't report leaks. */
581 static void free_everything(void)
583 struct failtest_call *i;
585 while ((i = tlist_top(&history, list)) != NULL)
588 failtable_clear(&failtable);
591 static NORETURN void failtest_cleanup(bool forced_cleanup, int status)
593 struct failtest_call *i;
596 /* For children, we don't care if they "failed" the testing. */
597 if (control_fd != -1)
600 /* We don't restore contents for original parent. */
603 /* Cleanup everything, in reverse order. */
604 tlist_for_each_rev(&history, i, list) {
605 /* Don't restore things our parent did. */
606 if (i == our_history_start)
613 i->cleanup(&i->u, restore);
615 /* But their program shouldn't leak, even on failure. */
616 if (!forced_cleanup && i->can_leak) {
617 char *p = failpath_string();
618 printf("Leak at %s:%u: --failpath=%s\n",
619 i->file, i->line, p);
625 /* Put back mmaped files the way our parent (if any) expects. */
626 free_mmapped_files(true);
630 tell_parent(SUCCESS);
632 tell_parent(FAILURE);
636 static bool following_path(void)
640 /* + means continue after end, like normal. */
641 if (*failpath == '+') {
648 static bool follow_path(struct failtest_call *call)
650 if (*failpath == '\0') {
651 /* Continue, but don't inject errors. */
652 return call->fail = false;
655 if (tolower((unsigned char)*failpath) != info_to_arg[call->type])
656 errx(1, "Failpath expected '%s' got '%c'\n",
657 failpath, info_to_arg[call->type]);
658 call->fail = cisupper(*(failpath++));
660 call->can_leak = false;
664 static bool should_fail(struct failtest_call *call)
667 int control[2], output[2];
668 enum info_type type = UNEXPECTED;
671 struct failtest_call *dup;
673 if (call == &unrecorded_call)
676 if (following_path())
677 return follow_path(call);
679 /* Attach debugger if they asked for it. */
683 /* Pretend this last call matches whatever path wanted:
684 * keeps valgrind happy. */
685 call->fail = cisupper(debugpath[strlen(debugpath)-1]);
686 path = failpath_string();
688 if (streq(path, debugpath)) {
692 signal(SIGUSR1, SIG_IGN);
693 sprintf(str, "xterm -e gdb /proc/%d/exe %d &",
695 if (system(str) == 0)
698 /* Ignore last character: could be upper or lower. */
699 path[strlen(path)-1] = '\0';
700 if (!strstarts(debugpath, path)) {
702 "--debugpath not followed: %s\n", path);
709 /* Are we probing? If so, we never fail twice. */
711 trace("Not failing %c due to FAIL_PROBE return\n",
712 info_to_arg[call->type]);
713 return call->fail = false;
716 /* Don't fail more than once in the same place. */
717 dup = failtable_get(&failtable, call);
719 trace("Not failing %c due to duplicate\n",
720 info_to_arg[call->type]);
721 return call->fail = false;
725 switch (failtest_hook(&history)) {
732 trace("Not failing %c due to failhook return\n",
733 info_to_arg[call->type]);
741 /* Add it to our table of calls. */
742 failtable_add(&failtable, call);
744 /* We're going to fail in the child. */
746 if (pipe(control) != 0 || pipe(output) != 0)
747 err(1, "opening pipe");
749 /* Move out the way, to high fds. */
750 control[0] = move_fd_to_high(control[0]);
751 control[1] = move_fd_to_high(control[1]);
752 output[0] = move_fd_to_high(output[0]);
753 output[1] = move_fd_to_high(output[1]);
755 /* Prevent double-printing (in child and parent) */
762 err(1, "forking failed");
770 struct failtest_call *c;
772 c = tlist_tail(&history, list);
773 diff = time_between(time_now(), start);
774 failpath = failpath_string();
775 p = strrchr(c->file, '/');
780 trace("%u->%u (%u.%02u): %s (%s:%u)\n",
782 (int)diff.ts.tv_sec, (int)diff.ts.tv_nsec / 10000000,
783 failpath, p, c->line);
786 /* From here on, we have to clean up! */
787 our_history_start = tlist_tail(&history, list);
790 /* Don't swallow stderr if we're tracing. */
792 dup2(output[1], STDOUT_FILENO);
793 dup2(output[1], STDERR_FILENO);
794 if (output[1] != STDOUT_FILENO
795 && output[1] != STDERR_FILENO)
798 control_fd = move_fd_to_high(control[1]);
800 /* Forget any of our parent's saved files. */
801 free_mmapped_files(false);
803 /* Now, save any files we need to. */
804 save_mmapped_files();
806 /* Failed calls can't leak. */
807 call->can_leak = false;
812 signal(SIGUSR1, hand_down);
817 /* We grab output so we can display it; we grab writes so we
820 struct pollfd pfd[2];
823 pfd[0].fd = output[0];
824 pfd[0].events = POLLIN|POLLHUP;
825 pfd[1].fd = control[0];
826 pfd[1].events = POLLIN|POLLHUP;
829 ret = poll(pfd, 1, failtest_timeout_ms);
831 ret = poll(pfd, 2, failtest_timeout_ms);
838 err(1, "Poll returned %i", ret);
841 if (pfd[0].revents & POLLIN) {
844 out = realloc(out, outlen + 8192);
845 len = read(output[0], out + outlen, 8192);
847 } else if (type != SUCCESS && (pfd[1].revents & POLLIN)) {
848 if (read_all(control[0], &type, sizeof(type))) {
850 if (!read_write_info(control[0]))
852 } else if (type == RELEASE_LOCKS) {
854 /* FIXME: Tell them we're done... */
857 } else if (pfd[0].revents & POLLHUP) {
860 } while (type != FAILURE);
864 waitpid(child, &status, 0);
865 if (!WIFEXITED(status)) {
866 if (WTERMSIG(status) == SIGUSR1)
867 child_fail(out, outlen, "Timed out");
869 child_fail(out, outlen, "Killed by signal %u: ",
872 /* Child printed failure already, just pass up exit code. */
873 if (type == FAILURE) {
874 fprintf(stderr, "%.*s", (int)outlen, out);
876 exit(WEXITSTATUS(status) ? WEXITSTATUS(status) : 1);
878 if (WEXITSTATUS(status) != 0)
879 child_fail(out, outlen, "Exited with status %i: ",
880 WEXITSTATUS(status));
883 signal(SIGUSR1, SIG_DFL);
885 /* Only child does probe. */
888 /* We continue onwards without failing. */
893 static void cleanup_calloc(struct calloc_call *call, bool restore)
895 trace("undoing calloc %p\n", call->ret);
899 void *failtest_calloc(size_t nmemb, size_t size,
900 const char *file, unsigned line)
902 struct failtest_call *p;
903 struct calloc_call call;
906 p = add_history(FAILTEST_CALLOC, true, file, line, &call);
908 if (should_fail(p)) {
909 p->u.calloc.ret = NULL;
912 p->u.calloc.ret = calloc(nmemb, size);
913 set_cleanup(p, cleanup_calloc, struct calloc_call);
915 trace("calloc %zu x %zu %s:%u -> %p\n",
916 nmemb, size, file, line, p->u.calloc.ret);
918 return p->u.calloc.ret;
921 static void cleanup_malloc(struct malloc_call *call, bool restore)
923 trace("undoing malloc %p\n", call->ret);
927 void *failtest_malloc(size_t size, const char *file, unsigned line)
929 struct failtest_call *p;
930 struct malloc_call call;
933 p = add_history(FAILTEST_MALLOC, true, file, line, &call);
934 if (should_fail(p)) {
935 p->u.malloc.ret = NULL;
938 p->u.malloc.ret = malloc(size);
939 set_cleanup(p, cleanup_malloc, struct malloc_call);
941 trace("malloc %zu %s:%u -> %p\n",
942 size, file, line, p->u.malloc.ret);
944 return p->u.malloc.ret;
947 static void cleanup_realloc(struct realloc_call *call, bool restore)
949 trace("undoing realloc %p\n", call->ret);
953 /* Walk back and find out if we got this ptr from a previous routine. */
954 static void fixup_ptr_history(void *ptr, const char *why)
956 struct failtest_call *i;
958 /* Start at end of history, work back. */
959 tlist_for_each_rev(&history, i, list) {
961 case FAILTEST_REALLOC:
962 if (i->u.realloc.ret == ptr) {
963 trace("found realloc %p %s:%u matching %s\n",
964 ptr, i->file, i->line, why);
970 case FAILTEST_MALLOC:
971 if (i->u.malloc.ret == ptr) {
972 trace("found malloc %p %s:%u matching %s\n",
973 ptr, i->file, i->line, why);
979 case FAILTEST_CALLOC:
980 if (i->u.calloc.ret == ptr) {
981 trace("found calloc %p %s:%u matching %s\n",
982 ptr, i->file, i->line, why);
992 trace("Did not find %p matching %s\n", ptr, why);
995 void *failtest_realloc(void *ptr, size_t size, const char *file, unsigned line)
997 struct failtest_call *p;
998 struct realloc_call call;
1000 p = add_history(FAILTEST_REALLOC, true, file, line, &call);
1002 /* FIXME: Try one child moving allocation, one not. */
1003 if (should_fail(p)) {
1004 p->u.realloc.ret = NULL;
1007 /* Don't catch this one in the history fixup... */
1008 p->u.realloc.ret = NULL;
1009 fixup_ptr_history(ptr, "realloc");
1010 p->u.realloc.ret = realloc(ptr, size);
1011 set_cleanup(p, cleanup_realloc, struct realloc_call);
1013 trace("realloc %p %s:%u -> %p\n",
1014 ptr, file, line, p->u.realloc.ret);
1016 return p->u.realloc.ret;
1019 /* FIXME: Record free, so we can terminate fixup_ptr_history correctly.
1020 * If there's an alloc we don't see, it could get confusing if it matches
1021 * a previous allocation we did see. */
1022 void failtest_free(void *ptr)
1024 fixup_ptr_history(ptr, "free");
1025 trace("free %p\n", ptr);
1030 static struct contents_saved *save_file(const char *pathname)
1033 struct contents_saved *s;
1035 fd = open(pathname, O_RDONLY);
1039 s = save_contents(pathname, fd, lseek(fd, 0, SEEK_END), 0,
1040 "open with O_TRUNC");
1045 /* Optimization: don't create a child for an open which *we know*
1046 * would fail anyway. */
1047 static bool open_would_fail(const char *pathname, int flags)
1049 if ((flags & O_ACCMODE) == O_RDONLY)
1050 return access(pathname, R_OK) != 0;
1051 if (!(flags & O_CREAT)) {
1052 if ((flags & O_ACCMODE) == O_WRONLY)
1053 return access(pathname, W_OK) != 0;
1054 if ((flags & O_ACCMODE) == O_RDWR)
1055 return access(pathname, W_OK) != 0
1056 || access(pathname, R_OK) != 0;
1058 /* FIXME: We could check if it exists, for O_CREAT|O_EXCL */
1062 static void cleanup_open(struct open_call *call, bool restore)
1064 if (restore && call->saved)
1065 restore_contents(container_of(call, struct failtest_call,
1067 call->saved, false, "open with O_TRUNC");
1068 if (!call->closed) {
1069 trace("Cleaning up open %s by closing fd %i\n",
1070 call->pathname, call->ret);
1072 call->closed = true;
1077 int failtest_open(const char *pathname,
1078 const char *file, unsigned line, ...)
1080 struct failtest_call *p;
1081 struct open_call call;
1084 call.pathname = strdup(pathname);
1086 call.flags = va_arg(ap, int);
1087 call.always_save = false;
1088 call.closed = false;
1089 if (call.flags & O_CREAT) {
1090 call.mode = va_arg(ap, int);
1093 p = add_history(FAILTEST_OPEN, true, file, line, &call);
1094 /* Avoid memory leak! */
1095 if (p == &unrecorded_call)
1096 free((char *)call.pathname);
1098 if (should_fail(p)) {
1099 /* Don't bother inserting failures that would happen anyway. */
1100 if (open_would_fail(pathname, call.flags)) {
1101 trace("Open would have failed anyway: stopping\n");
1102 failtest_cleanup(true, 0);
1105 /* FIXME: Play with error codes? */
1108 /* Save the old version if they're truncating it. */
1109 if (call.flags & O_TRUNC)
1110 p->u.open.saved = save_file(pathname);
1112 p->u.open.saved = NULL;
1113 p->u.open.ret = open(pathname, call.flags, call.mode);
1114 if (p->u.open.ret == -1) {
1115 p->u.open.closed = true;
1116 p->can_leak = false;
1118 set_cleanup(p, cleanup_open, struct open_call);
1121 trace("open %s %s:%u -> %i (opener %p)\n",
1122 pathname, file, line, p->u.open.ret, &p->u.open);
1124 return p->u.open.ret;
1127 static void cleanup_mmap(struct mmap_call *mmap, bool restore)
1129 trace("cleaning up mmap @%p (opener %p)\n",
1130 mmap->ret, mmap->opener);
1132 restore_contents(mmap->opener, mmap->saved, false, "mmap");
1136 void *failtest_mmap(void *addr, size_t length, int prot, int flags,
1137 int fd, off_t offset, const char *file, unsigned line)
1139 struct failtest_call *p;
1140 struct mmap_call call;
1143 call.length = length;
1146 call.offset = offset;
1148 call.opener = opener_of(fd);
1150 /* If we don't know what file it was, don't fail. */
1153 fwarnx("failtest_mmap: couldn't figure out source for"
1154 " fd %i at %s:%u", fd, file, line);
1156 addr = mmap(addr, length, prot, flags, fd, offset);
1157 trace("mmap of fd %i -> %p (opener = NULL)\n", fd, addr);
1161 p = add_history(FAILTEST_MMAP, false, file, line, &call);
1162 if (should_fail(p)) {
1163 p->u.mmap.ret = MAP_FAILED;
1166 p->u.mmap.ret = mmap(addr, length, prot, flags, fd, offset);
1167 /* Save contents if we're writing to a normal file */
1168 if (p->u.mmap.ret != MAP_FAILED
1169 && (prot & PROT_WRITE)
1170 && call.opener->type == FAILTEST_OPEN) {
1171 const char *fname = call.opener->u.open.pathname;
1172 p->u.mmap.saved = save_contents(fname, fd, length,
1173 offset, "being mmapped");
1174 set_cleanup(p, cleanup_mmap, struct mmap_call);
1177 trace("mmap of fd %i %s:%u -> %p (opener = %p)\n",
1178 fd, file, line, addr, call.opener);
1180 return p->u.mmap.ret;
1183 /* Since OpenBSD can't handle adding args, we use this file and line.
1184 * This will make all mmaps look the same, reducing coverage. */
1185 void *failtest_mmap_noloc(void *addr, size_t length, int prot, int flags,
1186 int fd, off_t offset)
1188 return failtest_mmap(addr, length, prot, flags, fd, offset,
1189 __FILE__, __LINE__);
1192 static void cleanup_pipe(struct pipe_call *call, bool restore)
1194 trace("cleaning up pipe fd=%i%s,%i%s\n",
1195 call->fds[0], call->closed[0] ? "(already closed)" : "",
1196 call->fds[1], call->closed[1] ? "(already closed)" : "");
1197 if (!call->closed[0])
1198 close(call->fds[0]);
1199 if (!call->closed[1])
1200 close(call->fds[1]);
1203 int failtest_pipe(int pipefd[2], const char *file, unsigned line)
1205 struct failtest_call *p;
1207 p = add_history_(FAILTEST_PIPE, true, file, line, NULL, 0);
1208 if (should_fail(p)) {
1210 /* FIXME: Play with error codes? */
1213 p->u.pipe.ret = pipe(p->u.pipe.fds);
1214 p->u.pipe.closed[0] = p->u.pipe.closed[1] = false;
1215 set_cleanup(p, cleanup_pipe, struct pipe_call);
1218 trace("pipe %s:%u -> %i,%i\n", file, line,
1219 p->u.pipe.ret ? -1 : p->u.pipe.fds[0],
1220 p->u.pipe.ret ? -1 : p->u.pipe.fds[1]);
1222 /* This causes valgrind to notice if they use pipefd[] after failure */
1223 memcpy(pipefd, p->u.pipe.fds, sizeof(p->u.pipe.fds));
1225 return p->u.pipe.ret;
1228 static void cleanup_read(struct read_call *call, bool restore)
1231 trace("cleaning up read on fd %i: seeking to %llu\n",
1232 call->fd, (long long)call->off);
1234 /* Read (not readv!) moves file offset! */
1235 if (lseek(call->fd, call->off, SEEK_SET) != call->off) {
1236 fwarn("Restoring lseek pointer failed (read)");
1241 static ssize_t failtest_add_read(int fd, void *buf, size_t count, off_t off,
1242 bool is_pread, const char *file, unsigned line)
1244 struct failtest_call *p;
1245 struct read_call call;
1250 p = add_history(FAILTEST_READ, false, file, line, &call);
1252 /* FIXME: Try partial read returns. */
1253 if (should_fail(p)) {
1258 p->u.read.ret = pread(fd, buf, count, off);
1260 p->u.read.ret = read(fd, buf, count);
1261 if (p->u.read.ret != -1)
1262 set_cleanup(p, cleanup_read, struct read_call);
1265 trace("%sread %s:%u fd %i %zu@%llu -> %zi\n",
1266 is_pread ? "p" : "", file, line, fd, count, (long long)off,
1269 return p->u.read.ret;
1272 static void cleanup_write(struct write_call *write, bool restore)
1274 trace("cleaning up write on %s\n", write->opener->u.open.pathname);
1276 restore_contents(write->opener, write->saved, !write->is_pwrite,
1281 static ssize_t failtest_add_write(int fd, const void *buf,
1282 size_t count, off_t off,
1284 const char *file, unsigned line)
1286 struct failtest_call *p;
1287 struct write_call call;
1293 call.is_pwrite = is_pwrite;
1294 call.opener = opener_of(fd);
1295 p = add_history(FAILTEST_WRITE, false, file, line, &call);
1297 /* If we're a child, we need to make sure we write the same thing
1298 * to non-files as the parent does, so tell it. */
1299 if (control_fd != -1 && off == (off_t)-1) {
1300 enum info_type type = WRITE;
1302 write_all(control_fd, &type, sizeof(type));
1303 write_all(control_fd, &p->u.write, sizeof(p->u.write));
1304 write_all(control_fd, buf, count);
1307 /* FIXME: Try partial write returns. */
1308 if (should_fail(p)) {
1309 p->u.write.ret = -1;
1313 assert(call.opener == p->u.write.opener);
1315 if (p->u.write.opener) {
1316 is_file = (p->u.write.opener->type == FAILTEST_OPEN);
1318 /* We can't unwind it, so at least check same
1319 * in parent and child. */
1323 /* FIXME: We assume same write order in parent and child */
1324 if (!is_file && child_writes_num != 0) {
1325 if (child_writes[0].fd != fd)
1326 errx(1, "Child wrote to fd %u, not %u?",
1327 child_writes[0].fd, fd);
1328 if (child_writes[0].off != p->u.write.off)
1329 errx(1, "Child wrote to offset %zu, not %zu?",
1330 (size_t)child_writes[0].off,
1331 (size_t)p->u.write.off);
1332 if (child_writes[0].count != count)
1333 errx(1, "Child wrote length %zu, not %zu?",
1334 child_writes[0].count, count);
1335 if (memcmp(child_writes[0].buf, buf, count)) {
1337 "Child wrote differently to"
1338 " fd %u than we did!\n", fd);
1340 free((char *)child_writes[0].buf);
1342 memmove(&child_writes[0], &child_writes[1],
1343 sizeof(child_writes[0]) * child_writes_num);
1345 /* Child wrote it already. */
1346 trace("write %s:%i on fd %i already done by child\n",
1348 p->u.write.ret = count;
1350 return p->u.write.ret;
1354 p->u.write.saved = save_contents(call.opener->u.open.pathname,
1356 "being overwritten");
1357 set_cleanup(p, cleanup_write, struct write_call);
1360 /* Though off is current seek ptr for write case, we need to
1361 * move it. write() does that for us. */
1362 if (p->u.write.is_pwrite)
1363 p->u.write.ret = pwrite(fd, buf, count, off);
1365 p->u.write.ret = write(fd, buf, count);
1367 trace("%swrite %s:%i %zu@%llu on fd %i -> %zi\n",
1368 p->u.write.is_pwrite ? "p" : "",
1369 file, line, count, (long long)off, fd, p->u.write.ret);
1371 return p->u.write.ret;
1374 ssize_t failtest_pwrite(int fd, const void *buf, size_t count, off_t offset,
1375 const char *file, unsigned line)
1377 return failtest_add_write(fd, buf, count, offset, true, file, line);
1380 ssize_t failtest_write(int fd, const void *buf, size_t count,
1381 const char *file, unsigned line)
1383 return failtest_add_write(fd, buf, count, lseek(fd, 0, SEEK_CUR), false,
1387 ssize_t failtest_pread(int fd, void *buf, size_t count, off_t off,
1388 const char *file, unsigned line)
1390 return failtest_add_read(fd, buf, count, off, true, file, line);
1393 ssize_t failtest_read(int fd, void *buf, size_t count,
1394 const char *file, unsigned line)
1396 return failtest_add_read(fd, buf, count, lseek(fd, 0, SEEK_CUR), false,
1400 static struct lock_info *WARN_UNUSED_RESULT
1401 add_lock(struct lock_info *locks, int fd, off_t start, off_t end, int type)
1404 struct lock_info *l;
1406 for (i = 0; i < lock_num; i++) {
1411 /* Four cases we care about:
1425 if (start > l->start && end < l->end) {
1426 /* Mid overlap: trim entry, add new one. */
1427 off_t new_start, new_end;
1428 new_start = end + 1;
1430 trace("splitting lock on fd %i from %llu-%llu"
1432 fd, (long long)l->start, (long long)l->end,
1433 (long long)l->start, (long long)start - 1);
1435 locks = add_lock(locks,
1436 fd, new_start, new_end, l->type);
1438 } else if (start <= l->start && end >= l->end) {
1439 /* Total overlap: eliminate entry. */
1440 trace("erasing lock on fd %i %llu-%llu\n",
1441 fd, (long long)l->start, (long long)l->end);
1444 } else if (end >= l->start && end < l->end) {
1445 trace("trimming lock on fd %i from %llu-%llu"
1447 fd, (long long)l->start, (long long)l->end,
1448 (long long)end + 1, (long long)l->end);
1449 /* Start overlap: trim entry. */
1451 } else if (start > l->start && start <= l->end) {
1452 trace("trimming lock on fd %i from %llu-%llu"
1454 fd, (long long)l->start, (long long)l->end,
1455 (long long)l->start, (long long)start - 1);
1456 /* End overlap: trim entry. */
1459 /* Nothing left? Remove it. */
1460 if (l->end < l->start) {
1461 trace("forgetting lock on fd %i\n", fd);
1462 memmove(l, l + 1, (--lock_num - i) * sizeof(l[0]));
1467 if (type != F_UNLCK) {
1468 locks = realloc(locks, (lock_num + 1) * sizeof(*locks));
1469 l = &locks[lock_num++];
1474 trace("new lock on fd %i %llu-%llu\n",
1475 fd, (long long)l->start, (long long)l->end);
1480 /* We trap this so we can record it: we don't fail it. */
1481 int failtest_close(int fd, const char *file, unsigned line)
1483 struct close_call call;
1484 struct failtest_call *p, *opener;
1486 /* Do this before we add ourselves to history! */
1487 opener = opener_of(fd);
1490 p = add_history(FAILTEST_CLOSE, false, file, line, &call);
1493 /* Consume close from failpath (shouldn't tell us to fail). */
1494 if (following_path()) {
1499 trace("close on fd %i\n", fd);
1503 /* Mark opener as not leaking, remove its cleanup function. */
1505 trace("close on fd %i found opener %p\n", fd, opener);
1506 if (opener->type == FAILTEST_PIPE) {
1508 if (opener->u.pipe.fds[0] == fd) {
1509 assert(!opener->u.pipe.closed[0]);
1510 opener->u.pipe.closed[0] = true;
1511 } else if (opener->u.pipe.fds[1] == fd) {
1512 assert(!opener->u.pipe.closed[1]);
1513 opener->u.pipe.closed[1] = true;
1516 opener->can_leak = (!opener->u.pipe.closed[0]
1517 || !opener->u.pipe.closed[1]);
1518 } else if (opener->type == FAILTEST_OPEN) {
1519 opener->u.open.closed = true;
1520 opener->can_leak = false;
1525 /* Restore offset now, in case parent shared (can't do after close!). */
1526 if (control_fd != -1) {
1527 struct failtest_call *i;
1529 tlist_for_each_rev(&history, i, list) {
1530 if (i == our_history_start)
1534 if (i->type == FAILTEST_LSEEK && i->u.lseek.fd == fd) {
1535 trace("close on fd %i undoes lseek\n", fd);
1536 /* This seeks back. */
1537 i->cleanup(&i->u, true);
1539 } else if (i->type == FAILTEST_WRITE
1540 && i->u.write.fd == fd
1541 && !i->u.write.is_pwrite) {
1542 trace("close on fd %i undoes write"
1543 " offset change\n", fd);
1544 /* Write (not pwrite!) moves file offset! */
1545 if (lseek(fd, i->u.write.off, SEEK_SET)
1546 != i->u.write.off) {
1547 fwarn("Restoring lseek pointer failed (write)");
1549 } else if (i->type == FAILTEST_READ
1550 && i->u.read.fd == fd) {
1551 /* preads don't *have* cleanups */
1553 trace("close on fd %i undoes read"
1554 " offset change\n", fd);
1555 /* This seeks back. */
1556 i->cleanup(&i->u, true);
1563 /* Close unlocks everything. */
1564 locks = add_lock(locks, fd, 0, off_max(), F_UNLCK);
1568 /* Zero length means "to end of file" */
1569 static off_t end_of(off_t start, off_t len)
1573 return start + len - 1;
1576 /* FIXME: This only handles locks, really. */
1577 int failtest_fcntl(int fd, const char *file, unsigned line, int cmd, ...)
1579 struct failtest_call *p;
1580 struct fcntl_call call;
1586 /* Argument extraction. */
1591 call.arg.l = va_arg(ap, long);
1593 trace("fcntl on fd %i F_SETFL/F_SETFD\n", fd);
1594 return fcntl(fd, cmd, call.arg.l);
1597 trace("fcntl on fd %i F_GETFL/F_GETFD\n", fd);
1598 return fcntl(fd, cmd);
1600 trace("fcntl on fd %i F_GETLK\n", fd);
1603 call.arg.fl = *va_arg(ap, struct flock *);
1605 return fcntl(fd, cmd, &call.arg.fl);
1608 trace("fcntl on fd %i F_SETLK%s\n",
1609 fd, cmd == F_SETLKW ? "W" : "");
1611 call.arg.fl = *va_arg(ap, struct flock *);
1615 /* This means you need to implement it here. */
1616 err(1, "failtest: unknown fcntl %u", cmd);
1619 p = add_history(FAILTEST_FCNTL, false, file, line, &call);
1621 if (should_fail(p)) {
1622 p->u.fcntl.ret = -1;
1623 if (p->u.fcntl.cmd == F_SETLK)
1629 p->u.fcntl.ret = fcntl(p->u.fcntl.fd, p->u.fcntl.cmd,
1630 &p->u.fcntl.arg.fl);
1631 if (p->u.fcntl.ret == -1)
1634 /* We don't handle anything else yet. */
1635 assert(p->u.fcntl.arg.fl.l_whence == SEEK_SET);
1636 locks = add_lock(locks,
1638 p->u.fcntl.arg.fl.l_start,
1639 end_of(p->u.fcntl.arg.fl.l_start,
1640 p->u.fcntl.arg.fl.l_len),
1641 p->u.fcntl.arg.fl.l_type);
1644 trace("fcntl on fd %i -> %i\n", fd, p->u.fcntl.ret);
1646 return p->u.fcntl.ret;
1649 static void cleanup_lseek(struct lseek_call *call, bool restore)
1652 trace("cleaning up lseek on fd %i -> %llu\n",
1653 call->fd, (long long)call->old_off);
1654 if (lseek(call->fd, call->old_off, SEEK_SET) != call->old_off)
1655 fwarn("Restoring lseek pointer failed");
1659 /* We trap this so we can undo it: we don't fail it. */
1660 off_t failtest_lseek(int fd, off_t offset, int whence, const char *file,
1663 struct failtest_call *p;
1664 struct lseek_call call;
1666 call.offset = offset;
1667 call.whence = whence;
1668 call.old_off = lseek(fd, 0, SEEK_CUR);
1670 p = add_history(FAILTEST_LSEEK, false, file, line, &call);
1673 /* Consume lseek from failpath. */
1678 p->u.lseek.ret = lseek(fd, offset, whence);
1680 if (p->u.lseek.ret != (off_t)-1)
1681 set_cleanup(p, cleanup_lseek, struct lseek_call);
1683 trace("lseek %s:%u on fd %i from %llu to %llu%s\n",
1684 file, line, fd, (long long)call.old_off, (long long)offset,
1685 whence == SEEK_CUR ? " (from current off)" :
1686 whence == SEEK_END ? " (from end)" :
1687 whence == SEEK_SET ? "" : " (invalid whence)");
1688 return p->u.lseek.ret;
1692 pid_t failtest_getpid(const char *file, unsigned line)
1694 /* You must call failtest_init first! */
1699 void failtest_init(int argc, char *argv[])
1703 orig_pid = getpid();
1705 warnf = fdopen(move_fd_to_high(dup(STDERR_FILENO)), "w");
1706 for (i = 1; i < argc; i++) {
1707 if (!strncmp(argv[i], "--failpath=", strlen("--failpath="))) {
1708 failpath = argv[i] + strlen("--failpath=");
1709 } else if (strcmp(argv[i], "--trace") == 0) {
1711 failtest_timeout_ms = -1;
1712 } else if (!strncmp(argv[i], "--debugpath=",
1713 strlen("--debugpath="))) {
1714 debugpath = argv[i] + strlen("--debugpath=");
1717 failtable_init(&failtable);
1721 bool failtest_has_failed(void)
1723 return control_fd != -1;
1726 void failtest_exit(int status)
1728 trace("failtest_exit with status %i\n", status);
1729 if (failtest_exit_check) {
1730 if (!failtest_exit_check(&history))
1731 child_fail(NULL, 0, "failtest_exit_check failed\n");
1734 failtest_cleanup(false, status);