1 /* Licensed under LGPL - see LICENSE file for details */
2 #include <ccan/failtest/failtest.h>
12 #include <sys/types.h>
19 #include <ccan/time/time.h>
20 #include <ccan/read_write_all/read_write_all.h>
21 #include <ccan/failtest/failtest_proto.h>
22 #include <ccan/build_assert/build_assert.h>
23 #include <ccan/hash/hash.h>
24 #include <ccan/htable/htable_type.h>
25 #include <ccan/str/str.h>
27 enum failtest_result (*failtest_hook)(struct tlist_calls *);
29 static int tracefd = -1;
32 unsigned int failtest_timeout_ms = 20000;
35 const char *debugpath;
47 /* end is inclusive: you can't have a 0-byte lock. */
52 /* We hash the call location together with its backtrace. */
53 static size_t hash_call(const struct failtest_call *call)
55 return hash(call->file, strlen(call->file),
57 hash(call->backtrace, call->backtrace_num,
61 static bool call_eq(const struct failtest_call *call1,
62 const struct failtest_call *call2)
66 if (strcmp(call1->file, call2->file) != 0
67 || call1->line != call2->line
68 || call1->type != call2->type
69 || call1->backtrace_num != call2->backtrace_num)
72 for (i = 0; i < call1->backtrace_num; i++)
73 if (call1->backtrace[i] != call2->backtrace[i])
79 /* Defines struct failtable. */
80 HTABLE_DEFINE_TYPE(struct failtest_call, (struct failtest_call *), hash_call,
83 bool (*failtest_exit_check)(struct tlist_calls *history);
85 static struct tlist_calls history = TLIST_INIT(history);
86 static int control_fd = -1;
87 static struct timeval start;
88 static bool probing = false;
89 static struct failtable failtable;
91 static struct write_call *child_writes = NULL;
92 static unsigned int child_writes_num = 0;
94 static pid_t lock_owner;
95 static struct lock_info *locks = NULL;
96 static unsigned int lock_num = 0;
98 static pid_t orig_pid;
100 static const char info_to_arg[] = "mceoxprwfa";
102 /* Dummy call used for failtest_undo wrappers. */
103 static struct failtest_call unrecorded_call;
106 #include <execinfo.h>
108 static void **get_backtrace(unsigned int *num)
110 static unsigned int max_back = 100;
114 ret = malloc(max_back * sizeof(void *));
115 *num = backtrace(ret, max_back);
116 if (*num == max_back) {
124 /* This will test slightly less, since will consider all of the same
125 * calls as identical. But, it's slightly faster! */
126 static void **get_backtrace(unsigned int *num)
131 #endif /* HAVE_BACKTRACE */
133 static struct failtest_call *add_history_(enum failtest_call_type type,
139 struct failtest_call *call;
141 /* NULL file is how we suppress failure. */
143 return &unrecorded_call;
145 call = malloc(sizeof *call);
149 call->cleanup = NULL;
150 call->backtrace = get_backtrace(&call->backtrace_num);
151 memcpy(&call->u, elem, elem_size);
152 tlist_add_tail(&history, call, list);
156 #define add_history(type, file, line, elem) \
157 add_history_((type), (file), (line), (elem), sizeof(*(elem)))
159 /* We do a fake call inside a sizeof(), to check types. */
160 #define set_cleanup(call, clean, type) \
161 (call)->cleanup = (void *)((void)sizeof(clean((type *)NULL),1), (clean))
164 /* Dup the fd to a high value (out of the way I hope!), and close the old fd. */
165 static int move_fd_to_high(int fd)
169 for (i = FD_SETSIZE - 1; i >= 0; i--) {
170 if (fcntl(i, F_GETFL) == -1 && errno == EBADF) {
171 if (dup2(fd, i) == -1)
172 err(1, "Failed to dup fd %i to %i", fd, i);
177 /* Nothing? Really? Er... ok? */
181 static bool read_write_info(int fd)
183 struct write_call *w;
186 /* We don't need all of this, but it's simple. */
187 child_writes = realloc(child_writes,
188 (child_writes_num+1) * sizeof(child_writes[0]));
189 w = &child_writes[child_writes_num];
190 if (!read_all(fd, w, sizeof(*w)))
193 w->buf = buf = malloc(w->count);
194 if (!read_all(fd, buf, w->count))
201 static char *failpath_string(void)
203 struct failtest_call *i;
204 char *ret = strdup("");
207 /* Inefficient, but who cares? */
208 tlist_for_each(&history, i, list) {
209 ret = realloc(ret, len + 2);
210 ret[len] = info_to_arg[i->type];
212 ret[len] = toupper(ret[len]);
218 static void warn_via_fd(int e, const char *fmt, va_list ap)
220 char *p = failpath_string();
222 vdprintf(warnfd, fmt, ap);
224 dprintf(warnfd, ": %s", strerror(e));
225 dprintf(warnfd, " [%s]\n", p);
229 static void fwarn(const char *fmt, ...)
235 warn_via_fd(e, fmt, ap);
240 static void fwarnx(const char *fmt, ...)
245 warn_via_fd(-1, fmt, ap);
249 static void tell_parent(enum info_type type)
251 if (control_fd != -1)
252 write_all(control_fd, &type, sizeof(type));
255 static void child_fail(const char *out, size_t outlen, const char *fmt, ...)
258 char *path = failpath_string();
261 vfprintf(stderr, fmt, ap);
264 fprintf(stderr, "%.*s", (int)outlen, out);
265 printf("To reproduce: --failpath=%s\n", path);
267 tell_parent(FAILURE);
271 static void trace(const char *fmt, ...)
279 vdprintf(tracefd, fmt, ap);
285 static void hand_down(int signum)
290 static void release_locks(void)
292 /* Locks were never acquired/reacquired? */
296 /* We own them? Release them all. */
297 if (lock_owner == getpid()) {
301 fl.l_whence = SEEK_SET;
305 for (i = 0; i < lock_num; i++)
306 fcntl(locks[i].fd, F_SETLK, &fl);
308 /* Our parent must have them; pass request up. */
309 enum info_type type = RELEASE_LOCKS;
310 assert(control_fd != -1);
311 write_all(control_fd, &type, sizeof(type));
316 /* off_t is a signed type. Getting its max is non-trivial. */
317 static off_t off_max(void)
319 BUILD_ASSERT(sizeof(off_t) == 4 || sizeof(off_t) == 8);
320 if (sizeof(off_t) == 4)
321 return (off_t)0x7FFFFFF;
323 return (off_t)0x7FFFFFFFFFFFFFFULL;
326 static void get_locks(void)
331 if (lock_owner == getpid())
334 if (lock_owner != 0) {
335 enum info_type type = RELEASE_LOCKS;
336 assert(control_fd != -1);
337 write_all(control_fd, &type, sizeof(type));
340 fl.l_whence = SEEK_SET;
342 for (i = 0; i < lock_num; i++) {
343 fl.l_type = locks[i].type;
344 fl.l_start = locks[i].start;
345 if (locks[i].end == off_max())
348 fl.l_len = locks[i].end - locks[i].start + 1;
350 if (fcntl(locks[i].fd, F_SETLKW, &fl) != 0)
353 lock_owner = getpid();
357 struct saved_file *next;
363 static struct saved_file *save_file(struct saved_file *next, int fd)
365 struct saved_file *s = malloc(sizeof(*s));
369 s->off = lseek(fd, 0, SEEK_CUR);
370 /* Special file? Erk... */
371 assert(s->off != -1);
372 s->len = lseek(fd, 0, SEEK_END);
373 lseek(fd, 0, SEEK_SET);
374 s->contents = malloc(s->len);
375 if (read(fd, s->contents, s->len) != s->len)
376 err(1, "Failed to save %zu bytes", (size_t)s->len);
377 lseek(fd, s->off, SEEK_SET);
381 /* We have little choice but to save and restore open files: mmap means we
382 * can really intercept changes in the child.
384 * We could do non-mmap'ed files on demand, however. */
385 static struct saved_file *save_files(void)
387 struct saved_file *files = NULL;
388 struct failtest_call *i;
390 /* Figure out the set of live fds. */
391 tlist_for_each_rev(&history, i, list) {
392 if (i->type == FAILTEST_OPEN) {
393 int fd = i->u.open.ret;
394 /* Only do successful, writable fds. */
398 /* If it was closed, cleanup == NULL. */
402 if ((i->u.open.flags & O_RDWR) == O_RDWR) {
403 files = save_file(files, fd);
404 } else if ((i->u.open.flags & O_WRONLY)
406 /* FIXME: Handle O_WRONLY. Open with O_RDWR? */
415 static void restore_files(struct saved_file *s)
418 struct saved_file *next = s->next;
420 lseek(s->fd, 0, SEEK_SET);
421 if (write(s->fd, s->contents, s->len) != s->len)
422 err(1, "Failed to restore %zu bytes", (size_t)s->len);
423 if (ftruncate(s->fd, s->len) != 0)
424 err(1, "Failed to trim file to length %zu",
427 lseek(s->fd, s->off, SEEK_SET);
433 static void free_files(struct saved_file *s)
436 struct saved_file *next = s->next;
443 static void free_call(struct failtest_call *call)
445 /* We don't do this in cleanup: needed even for failed opens. */
446 if (call->type == FAILTEST_OPEN)
447 free((char *)call->u.open.pathname);
448 free(call->backtrace);
449 tlist_del_from(&history, call, list);
453 /* Free up memory, so valgrind doesn't report leaks. */
454 static void free_everything(void)
456 struct failtest_call *i;
458 while ((i = tlist_top(&history, struct failtest_call, list)) != NULL)
461 failtable_clear(&failtable);
464 static NORETURN void failtest_cleanup(bool forced_cleanup, int status)
466 struct failtest_call *i;
468 /* For children, we don't care if they "failed" the testing. */
469 if (control_fd != -1)
472 if (forced_cleanup) {
473 /* We didn't actually do final operation: remove it. */
474 i = tlist_tail(&history, struct failtest_call, list);
478 /* Cleanup everything, in reverse order. */
479 tlist_for_each_rev(&history, i, list) {
482 if (!forced_cleanup) {
483 printf("Leak at %s:%u: --failpath=%s\n",
484 i->file, i->line, failpath_string());
492 tell_parent(SUCCESS);
494 tell_parent(FAILURE);
498 static bool should_fail(struct failtest_call *call)
501 int control[2], output[2];
502 enum info_type type = UNEXPECTED;
505 struct saved_file *files;
506 struct failtest_call *dup;
508 if (call == &unrecorded_call)
512 /* + means continue after end, like normal. */
513 if (*failpath == '+')
515 else if (*failpath == '\0') {
516 /* Continue, but don't inject errors. */
517 return call->fail = false;
519 if (tolower((unsigned char)*failpath)
520 != info_to_arg[call->type])
521 errx(1, "Failpath expected '%s' got '%c'\n",
522 failpath, info_to_arg[call->type]);
523 call->fail = cisupper(*(failpath++));
528 /* Attach debugger if they asked for it. */
532 /* Pretend this last call matches whatever path wanted:
533 * keeps valgrind happy. */
534 call->fail = cisupper(debugpath[strlen(debugpath)-1]);
535 path = failpath_string();
537 if (streq(path, debugpath)) {
541 signal(SIGUSR1, SIG_IGN);
542 sprintf(str, "xterm -e gdb /proc/%d/exe %d &",
544 if (system(str) == 0)
547 /* Ignore last character: could be upper or lower. */
548 path[strlen(path)-1] = '\0';
549 if (!strstarts(debugpath, path)) {
551 "--debugpath not followed: %s\n", path);
558 /* Are we probing? If so, we never fail twice. */
560 return call->fail = false;
562 /* Don't more than once in the same place. */
563 dup = failtable_get(&failtable, call);
565 return call->fail = false;
568 switch (failtest_hook(&history)) {
582 /* Add it to our table of calls. */
583 failtable_add(&failtable, call);
585 files = save_files();
587 /* We're going to fail in the child. */
589 if (pipe(control) != 0 || pipe(output) != 0)
590 err(1, "opening pipe");
592 /* Prevent double-printing (in child and parent) */
596 err(1, "forking failed");
603 struct failtest_call *c;
605 c = tlist_tail(&history, struct failtest_call, list);
606 diff = time_sub(time_now(), start);
607 failpath = failpath_string();
608 trace("%u->%u (%u.%02u): %s (", getppid(), getpid(),
609 (int)diff.tv_sec, (int)diff.tv_usec / 10000,
612 p = strrchr(c->file, '/');
616 trace("%s", c->file);
617 trace(":%u)\n", c->line);
621 dup2(output[1], STDOUT_FILENO);
622 dup2(output[1], STDERR_FILENO);
623 if (output[1] != STDOUT_FILENO && output[1] != STDERR_FILENO)
625 control_fd = move_fd_to_high(control[1]);
626 /* Valgrind spots the leak if we don't free these. */
631 signal(SIGUSR1, hand_down);
636 /* We grab output so we can display it; we grab writes so we
639 struct pollfd pfd[2];
642 pfd[0].fd = output[0];
643 pfd[0].events = POLLIN|POLLHUP;
644 pfd[1].fd = control[0];
645 pfd[1].events = POLLIN|POLLHUP;
648 ret = poll(pfd, 1, failtest_timeout_ms);
650 ret = poll(pfd, 2, failtest_timeout_ms);
657 err(1, "Poll returned %i", ret);
660 if (pfd[0].revents & POLLIN) {
663 out = realloc(out, outlen + 8192);
664 len = read(output[0], out + outlen, 8192);
666 } else if (type != SUCCESS && (pfd[1].revents & POLLIN)) {
667 if (read_all(control[0], &type, sizeof(type))) {
669 if (!read_write_info(control[0]))
671 } else if (type == RELEASE_LOCKS) {
673 /* FIXME: Tell them we're done... */
676 } else if (pfd[0].revents & POLLHUP) {
679 } while (type != FAILURE);
683 waitpid(child, &status, 0);
684 if (!WIFEXITED(status)) {
685 if (WTERMSIG(status) == SIGUSR1)
686 child_fail(out, outlen, "Timed out");
688 child_fail(out, outlen, "Killed by signal %u: ",
691 /* Child printed failure already, just pass up exit code. */
692 if (type == FAILURE) {
693 fprintf(stderr, "%.*s", (int)outlen, out);
695 exit(WEXITSTATUS(status) ? WEXITSTATUS(status) : 1);
697 if (WEXITSTATUS(status) != 0)
698 child_fail(out, outlen, "Exited with status %i: ",
699 WEXITSTATUS(status));
702 signal(SIGUSR1, SIG_DFL);
704 restore_files(files);
706 /* Only child does probe. */
709 /* We continue onwards without failing. */
714 static void cleanup_calloc(struct calloc_call *call)
719 void *failtest_calloc(size_t nmemb, size_t size,
720 const char *file, unsigned line)
722 struct failtest_call *p;
723 struct calloc_call call;
726 p = add_history(FAILTEST_CALLOC, file, line, &call);
728 if (should_fail(p)) {
729 p->u.calloc.ret = NULL;
732 p->u.calloc.ret = calloc(nmemb, size);
733 set_cleanup(p, cleanup_calloc, struct calloc_call);
736 return p->u.calloc.ret;
739 static void cleanup_malloc(struct malloc_call *call)
744 void *failtest_malloc(size_t size, const char *file, unsigned line)
746 struct failtest_call *p;
747 struct malloc_call call;
750 p = add_history(FAILTEST_MALLOC, file, line, &call);
751 if (should_fail(p)) {
752 p->u.malloc.ret = NULL;
755 p->u.malloc.ret = malloc(size);
756 set_cleanup(p, cleanup_malloc, struct malloc_call);
759 return p->u.malloc.ret;
762 static void cleanup_realloc(struct realloc_call *call)
767 /* Walk back and find out if we got this ptr from a previous routine. */
768 static void fixup_ptr_history(void *ptr)
770 struct failtest_call *i;
772 /* Start at end of history, work back. */
773 tlist_for_each_rev(&history, i, list) {
775 case FAILTEST_REALLOC:
776 if (i->u.realloc.ret == ptr) {
781 case FAILTEST_MALLOC:
782 if (i->u.malloc.ret == ptr) {
787 case FAILTEST_CALLOC:
788 if (i->u.calloc.ret == ptr) {
799 void *failtest_realloc(void *ptr, size_t size, const char *file, unsigned line)
801 struct failtest_call *p;
802 struct realloc_call call;
804 p = add_history(FAILTEST_REALLOC, file, line, &call);
806 /* FIXME: Try one child moving allocation, one not. */
807 if (should_fail(p)) {
808 p->u.realloc.ret = NULL;
811 /* Don't catch this one in the history fixup... */
812 p->u.realloc.ret = NULL;
813 fixup_ptr_history(ptr);
814 p->u.realloc.ret = realloc(ptr, size);
815 set_cleanup(p, cleanup_realloc, struct realloc_call);
818 return p->u.realloc.ret;
821 void failtest_free(void *ptr)
823 fixup_ptr_history(ptr);
827 static void cleanup_open(struct open_call *call)
832 int failtest_open(const char *pathname,
833 const char *file, unsigned line, ...)
835 struct failtest_call *p;
836 struct open_call call;
839 call.pathname = strdup(pathname);
841 call.flags = va_arg(ap, int);
842 if (call.flags & O_CREAT) {
843 call.mode = va_arg(ap, int);
846 p = add_history(FAILTEST_OPEN, file, line, &call);
847 /* Avoid memory leak! */
848 if (p == &unrecorded_call)
849 free((char *)call.pathname);
850 p->u.open.ret = open(pathname, call.flags, call.mode);
852 if (p->u.open.ret == -1) {
855 } else if (should_fail(p)) {
856 close(p->u.open.ret);
858 /* FIXME: Play with error codes? */
861 set_cleanup(p, cleanup_open, struct open_call);
864 return p->u.open.ret;
867 void *failtest_mmap(void *addr, size_t length, int prot, int flags,
868 int fd, off_t offset, const char *file, unsigned line)
870 struct failtest_call *p;
871 struct mmap_call call;
874 call.length = length;
877 call.offset = offset;
880 p = add_history(FAILTEST_MMAP, file, line, &call);
881 if (should_fail(p)) {
882 p->u.mmap.ret = MAP_FAILED;
885 p->u.mmap.ret = mmap(addr, length, prot, flags, fd, offset);
888 return p->u.mmap.ret;
891 static void cleanup_pipe(struct pipe_call *call)
893 if (!call->closed[0])
895 if (!call->closed[1])
899 int failtest_pipe(int pipefd[2], const char *file, unsigned line)
901 struct failtest_call *p;
902 struct pipe_call call;
904 p = add_history(FAILTEST_PIPE, file, line, &call);
905 if (should_fail(p)) {
907 /* FIXME: Play with error codes? */
910 p->u.pipe.ret = pipe(p->u.pipe.fds);
911 p->u.pipe.closed[0] = p->u.pipe.closed[1] = false;
912 set_cleanup(p, cleanup_pipe, struct pipe_call);
914 /* This causes valgrind to notice if they use pipefd[] after failure */
915 memcpy(pipefd, p->u.pipe.fds, sizeof(p->u.pipe.fds));
917 return p->u.pipe.ret;
920 ssize_t failtest_pread(int fd, void *buf, size_t count, off_t off,
921 const char *file, unsigned line)
923 struct failtest_call *p;
924 struct read_call call;
929 p = add_history(FAILTEST_READ, file, line, &call);
931 /* FIXME: Try partial read returns. */
932 if (should_fail(p)) {
936 p->u.read.ret = pread(fd, buf, count, off);
939 return p->u.read.ret;
942 ssize_t failtest_pwrite(int fd, const void *buf, size_t count, off_t off,
943 const char *file, unsigned line)
945 struct failtest_call *p;
946 struct write_call call;
952 p = add_history(FAILTEST_WRITE, file, line, &call);
954 /* If we're a child, we need to make sure we write the same thing
955 * to non-files as the parent does, so tell it. */
956 if (control_fd != -1 && off == (off_t)-1) {
957 enum info_type type = WRITE;
959 write_all(control_fd, &type, sizeof(type));
960 write_all(control_fd, &p->u.write, sizeof(p->u.write));
961 write_all(control_fd, buf, count);
964 /* FIXME: Try partial write returns. */
965 if (should_fail(p)) {
969 /* FIXME: We assume same write order in parent and child */
970 if (off == (off_t)-1 && child_writes_num != 0) {
971 if (child_writes[0].fd != fd)
972 errx(1, "Child wrote to fd %u, not %u?",
973 child_writes[0].fd, fd);
974 if (child_writes[0].off != p->u.write.off)
975 errx(1, "Child wrote to offset %zu, not %zu?",
976 (size_t)child_writes[0].off,
977 (size_t)p->u.write.off);
978 if (child_writes[0].count != count)
979 errx(1, "Child wrote length %zu, not %zu?",
980 child_writes[0].count, count);
981 if (memcmp(child_writes[0].buf, buf, count)) {
983 "Child wrote differently to"
984 " fd %u than we did!\n", fd);
986 free((char *)child_writes[0].buf);
988 memmove(&child_writes[0], &child_writes[1],
989 sizeof(child_writes[0]) * child_writes_num);
991 /* Is this is a socket or pipe, child wrote it
993 if (p->u.write.off == (off_t)-1) {
994 p->u.write.ret = count;
996 return p->u.write.ret;
999 p->u.write.ret = pwrite(fd, buf, count, off);
1002 return p->u.write.ret;
1005 ssize_t failtest_read(int fd, void *buf, size_t count,
1006 const char *file, unsigned line)
1008 return failtest_pread(fd, buf, count, lseek(fd, 0, SEEK_CUR),
1012 ssize_t failtest_write(int fd, const void *buf, size_t count,
1013 const char *file, unsigned line)
1015 return failtest_pwrite(fd, buf, count, lseek(fd, 0, SEEK_CUR),
1019 static struct lock_info *WARN_UNUSED_RESULT
1020 add_lock(struct lock_info *locks, int fd, off_t start, off_t end, int type)
1023 struct lock_info *l;
1025 for (i = 0; i < lock_num; i++) {
1030 /* Four cases we care about:
1044 if (start > l->start && end < l->end) {
1045 /* Mid overlap: trim entry, add new one. */
1046 off_t new_start, new_end;
1047 new_start = end + 1;
1050 locks = add_lock(locks,
1051 fd, new_start, new_end, l->type);
1053 } else if (start <= l->start && end >= l->end) {
1054 /* Total overlap: eliminate entry. */
1057 } else if (end >= l->start && end < l->end) {
1058 /* Start overlap: trim entry. */
1060 } else if (start > l->start && start <= l->end) {
1061 /* End overlap: trim entry. */
1064 /* Nothing left? Remove it. */
1065 if (l->end < l->start) {
1066 memmove(l, l + 1, (--lock_num - i) * sizeof(l[0]));
1071 if (type != F_UNLCK) {
1072 locks = realloc(locks, (lock_num + 1) * sizeof(*locks));
1073 l = &locks[lock_num++];
1082 /* We trap this so we can record it: we don't fail it. */
1083 int failtest_close(int fd, const char *file, unsigned line)
1085 struct failtest_call *i;
1086 struct close_call call;
1087 struct failtest_call *p;
1090 p = add_history(FAILTEST_CLOSE, file, line, &call);
1093 /* Consume close from failpath. */
1101 /* Trace history to find source of fd. */
1102 tlist_for_each_rev(&history, i, list) {
1106 if (i->u.pipe.fds[0] == fd) {
1107 assert(!i->u.pipe.closed[0]);
1108 i->u.pipe.closed[0] = true;
1109 if (i->u.pipe.closed[1])
1113 if (i->u.pipe.fds[1] == fd) {
1114 assert(!i->u.pipe.closed[1]);
1115 i->u.pipe.closed[1] = true;
1116 if (i->u.pipe.closed[0])
1122 if (i->u.open.ret == fd) {
1123 assert((void *)i->cleanup
1124 == (void *)cleanup_open);
1135 locks = add_lock(locks, fd, 0, off_max(), F_UNLCK);
1139 /* Zero length means "to end of file" */
1140 static off_t end_of(off_t start, off_t len)
1144 return start + len - 1;
1147 /* FIXME: This only handles locks, really. */
1148 int failtest_fcntl(int fd, const char *file, unsigned line, int cmd, ...)
1150 struct failtest_call *p;
1151 struct fcntl_call call;
1157 /* Argument extraction. */
1162 call.arg.l = va_arg(ap, long);
1164 return fcntl(fd, cmd, call.arg.l);
1167 return fcntl(fd, cmd);
1171 call.arg.fl = *va_arg(ap, struct flock *);
1173 return fcntl(fd, cmd, &call.arg.fl);
1177 call.arg.fl = *va_arg(ap, struct flock *);
1181 /* This means you need to implement it here. */
1182 err(1, "failtest: unknown fcntl %u", cmd);
1185 p = add_history(FAILTEST_FCNTL, file, line, &call);
1187 if (should_fail(p)) {
1188 p->u.fcntl.ret = -1;
1189 if (p->u.fcntl.cmd == F_SETLK)
1195 p->u.fcntl.ret = fcntl(p->u.fcntl.fd, p->u.fcntl.cmd,
1196 &p->u.fcntl.arg.fl);
1197 if (p->u.fcntl.ret == -1)
1200 /* We don't handle anything else yet. */
1201 assert(p->u.fcntl.arg.fl.l_whence == SEEK_SET);
1202 locks = add_lock(locks,
1204 p->u.fcntl.arg.fl.l_start,
1205 end_of(p->u.fcntl.arg.fl.l_start,
1206 p->u.fcntl.arg.fl.l_len),
1207 p->u.fcntl.arg.fl.l_type);
1211 return p->u.fcntl.ret;
1214 pid_t failtest_getpid(const char *file, unsigned line)
1216 /* You must call failtest_init first! */
1221 void failtest_init(int argc, char *argv[])
1225 orig_pid = getpid();
1227 warnfd = move_fd_to_high(dup(STDERR_FILENO));
1228 for (i = 1; i < argc; i++) {
1229 if (!strncmp(argv[i], "--failpath=", strlen("--failpath="))) {
1230 failpath = argv[i] + strlen("--failpath=");
1231 } else if (strcmp(argv[i], "--tracepath") == 0) {
1233 failtest_timeout_ms = -1;
1234 } else if (!strncmp(argv[i], "--debugpath=",
1235 strlen("--debugpath="))) {
1236 debugpath = argv[i] + strlen("--debugpath=");
1239 failtable_init(&failtable);
1243 bool failtest_has_failed(void)
1245 return control_fd != -1;
1248 void failtest_exit(int status)
1250 if (failtest_exit_check) {
1251 if (!failtest_exit_check(&history))
1252 child_fail(NULL, 0, "failtest_exit_check failed\n");
1255 failtest_cleanup(false, status);