1 /* Licensed under LGPL - see LICENSE file for details */
2 #include <ccan/failtest/failtest.h>
12 #include <sys/types.h>
19 #include <ccan/time/time.h>
20 #include <ccan/read_write_all/read_write_all.h>
21 #include <ccan/failtest/failtest_proto.h>
22 #include <ccan/build_assert/build_assert.h>
23 #include <ccan/hash/hash.h>
24 #include <ccan/htable/htable_type.h>
25 #include <ccan/str/str.h>
27 enum failtest_result (*failtest_hook)(struct tlist_calls *);
29 static int tracefd = -1;
32 unsigned int failtest_timeout_ms = 20000;
35 const char *debugpath;
47 /* end is inclusive: you can't have a 0-byte lock. */
52 /* We hash the call location together with its backtrace. */
53 static size_t hash_call(const struct failtest_call *call)
55 return hash(call->file, strlen(call->file),
57 hash(call->backtrace, call->backtrace_num,
61 static bool call_eq(const struct failtest_call *call1,
62 const struct failtest_call *call2)
66 if (strcmp(call1->file, call2->file) != 0
67 || call1->line != call2->line
68 || call1->type != call2->type
69 || call1->backtrace_num != call2->backtrace_num)
72 for (i = 0; i < call1->backtrace_num; i++)
73 if (call1->backtrace[i] != call2->backtrace[i])
79 /* Defines struct failtable. */
80 HTABLE_DEFINE_TYPE(struct failtest_call, (struct failtest_call *), hash_call,
83 bool (*failtest_exit_check)(struct tlist_calls *history);
85 static struct tlist_calls history = TLIST_INIT(history);
86 static int control_fd = -1;
87 static struct timeval start;
88 static bool probing = false;
89 static struct failtable failtable;
91 static struct write_call *child_writes = NULL;
92 static unsigned int child_writes_num = 0;
94 static pid_t lock_owner;
95 static struct lock_info *locks = NULL;
96 static unsigned int lock_num = 0;
98 static pid_t orig_pid;
100 static const char info_to_arg[] = "mceoxprwfa";
102 /* Dummy call used for failtest_undo wrappers. */
103 static struct failtest_call unrecorded_call;
106 #include <execinfo.h>
108 static void **get_backtrace(unsigned int *num)
110 static unsigned int max_back = 100;
114 ret = malloc(max_back * sizeof(void *));
115 *num = backtrace(ret, max_back);
116 if (*num == max_back) {
124 /* This will test slightly less, since will consider all of the same
125 * calls as identical. But, it's slightly faster! */
126 static void **get_backtrace(unsigned int *num)
131 #endif /* HAVE_BACKTRACE */
133 static struct failtest_call *add_history_(enum failtest_call_type type,
139 struct failtest_call *call;
141 /* NULL file is how we suppress failure. */
143 return &unrecorded_call;
145 call = malloc(sizeof *call);
149 call->cleanup = NULL;
150 call->backtrace = get_backtrace(&call->backtrace_num);
151 memcpy(&call->u, elem, elem_size);
152 tlist_add_tail(&history, call, list);
156 #define add_history(type, file, line, elem) \
157 add_history_((type), (file), (line), (elem), sizeof(*(elem)))
159 /* We do a fake call inside a sizeof(), to check types. */
160 #define set_cleanup(call, clean, type) \
161 (call)->cleanup = (void *)((void)sizeof(clean((type *)NULL),1), (clean))
164 /* Dup the fd to a high value (out of the way I hope!), and close the old fd. */
165 static int move_fd_to_high(int fd)
169 for (i = FD_SETSIZE - 1; i >= 0; i--) {
170 if (fcntl(i, F_GETFL) == -1 && errno == EBADF) {
171 if (dup2(fd, i) == -1)
172 err(1, "Failed to dup fd %i to %i", fd, i);
177 /* Nothing? Really? Er... ok? */
181 static bool read_write_info(int fd)
183 struct write_call *w;
186 /* We don't need all of this, but it's simple. */
187 child_writes = realloc(child_writes,
188 (child_writes_num+1) * sizeof(child_writes[0]));
189 w = &child_writes[child_writes_num];
190 if (!read_all(fd, w, sizeof(*w)))
193 w->buf = buf = malloc(w->count);
194 if (!read_all(fd, buf, w->count))
201 static char *failpath_string(void)
203 struct failtest_call *i;
204 char *ret = strdup("");
207 /* Inefficient, but who cares? */
208 tlist_for_each(&history, i, list) {
209 ret = realloc(ret, len + 2);
210 ret[len] = info_to_arg[i->type];
212 ret[len] = toupper(ret[len]);
218 static void warn_via_fd(int e, const char *fmt, va_list ap)
220 char *p = failpath_string();
222 vdprintf(warnfd, fmt, ap);
224 dprintf(warnfd, ": %s", strerror(e));
225 dprintf(warnfd, " [%s]\n", p);
229 static void fwarn(const char *fmt, ...)
235 warn_via_fd(e, fmt, ap);
240 static void fwarnx(const char *fmt, ...)
245 warn_via_fd(-1, fmt, ap);
249 static void tell_parent(enum info_type type)
251 if (control_fd != -1)
252 write_all(control_fd, &type, sizeof(type));
255 static void child_fail(const char *out, size_t outlen, const char *fmt, ...)
258 char *path = failpath_string();
261 vfprintf(stderr, fmt, ap);
264 fprintf(stderr, "%.*s", (int)outlen, out);
265 printf("To reproduce: --failpath=%s\n", path);
267 tell_parent(FAILURE);
271 static void trace(const char *fmt, ...)
279 vdprintf(tracefd, fmt, ap);
285 static void hand_down(int signum)
290 static void release_locks(void)
292 /* Locks were never acquired/reacquired? */
296 /* We own them? Release them all. */
297 if (lock_owner == getpid()) {
301 fl.l_whence = SEEK_SET;
305 for (i = 0; i < lock_num; i++)
306 fcntl(locks[i].fd, F_SETLK, &fl);
308 /* Our parent must have them; pass request up. */
309 enum info_type type = RELEASE_LOCKS;
310 assert(control_fd != -1);
311 write_all(control_fd, &type, sizeof(type));
316 /* off_t is a signed type. Getting its max is non-trivial. */
317 static off_t off_max(void)
319 BUILD_ASSERT(sizeof(off_t) == 4 || sizeof(off_t) == 8);
320 if (sizeof(off_t) == 4)
321 return (off_t)0x7FFFFFF;
323 return (off_t)0x7FFFFFFFFFFFFFFULL;
326 static void get_locks(void)
331 if (lock_owner == getpid())
334 if (lock_owner != 0) {
335 enum info_type type = RELEASE_LOCKS;
336 assert(control_fd != -1);
337 write_all(control_fd, &type, sizeof(type));
340 fl.l_whence = SEEK_SET;
342 for (i = 0; i < lock_num; i++) {
343 fl.l_type = locks[i].type;
344 fl.l_start = locks[i].start;
345 if (locks[i].end == off_max())
348 fl.l_len = locks[i].end - locks[i].start + 1;
350 if (fcntl(locks[i].fd, F_SETLKW, &fl) != 0)
353 lock_owner = getpid();
357 struct saved_file *next;
363 static struct saved_file *save_file(struct saved_file *next, int fd)
365 struct saved_file *s = malloc(sizeof(*s));
369 s->off = lseek(fd, 0, SEEK_CUR);
370 /* Special file? Erk... */
371 assert(s->off != -1);
372 s->len = lseek(fd, 0, SEEK_END);
373 lseek(fd, 0, SEEK_SET);
374 s->contents = malloc(s->len);
375 if (read(fd, s->contents, s->len) != s->len)
376 err(1, "Failed to save %zu bytes", (size_t)s->len);
377 lseek(fd, s->off, SEEK_SET);
381 /* We have little choice but to save and restore open files: mmap means we
382 * can really intercept changes in the child.
384 * We could do non-mmap'ed files on demand, however. */
385 static struct saved_file *save_files(void)
387 struct saved_file *files = NULL;
388 struct failtest_call *i;
390 /* Figure out the set of live fds. */
391 tlist_for_each_rev(&history, i, list) {
392 if (i->type == FAILTEST_OPEN) {
393 int fd = i->u.open.ret;
394 /* Only do successful, writable fds. */
398 /* If it was closed, cleanup == NULL. */
402 if ((i->u.open.flags & O_RDWR) == O_RDWR) {
403 files = save_file(files, fd);
404 } else if ((i->u.open.flags & O_WRONLY)
406 /* FIXME: Handle O_WRONLY. Open with O_RDWR? */
415 static void restore_files(struct saved_file *s)
418 struct saved_file *next = s->next;
420 lseek(s->fd, 0, SEEK_SET);
421 if (write(s->fd, s->contents, s->len) != s->len)
422 err(1, "Failed to restore %zu bytes", (size_t)s->len);
423 if (ftruncate(s->fd, s->len) != 0)
424 err(1, "Failed to trim file to length %zu",
427 lseek(s->fd, s->off, SEEK_SET);
433 static void free_files(struct saved_file *s)
436 struct saved_file *next = s->next;
443 static void free_call(struct failtest_call *call)
445 /* We don't do this in cleanup: needed even for failed opens. */
446 if (call->type == FAILTEST_OPEN)
447 free((char *)call->u.open.pathname);
448 free(call->backtrace);
449 tlist_del_from(&history, call, list);
453 /* Free up memory, so valgrind doesn't report leaks. */
454 static void free_everything(void)
456 struct failtest_call *i;
458 while ((i = tlist_top(&history, struct failtest_call, list)) != NULL)
461 failtable_clear(&failtable);
464 static NORETURN void failtest_cleanup(bool forced_cleanup, int status)
466 struct failtest_call *i;
468 /* For children, we don't care if they "failed" the testing. */
469 if (control_fd != -1)
472 if (forced_cleanup) {
473 /* We didn't actually do final operation: remove it. */
474 i = tlist_tail(&history, struct failtest_call, list);
478 /* Cleanup everything, in reverse order. */
479 tlist_for_each_rev(&history, i, list) {
482 if (!forced_cleanup) {
483 printf("Leak at %s:%u: --failpath=%s\n",
484 i->file, i->line, failpath_string());
491 tell_parent(SUCCESS);
495 static bool should_fail(struct failtest_call *call)
498 int control[2], output[2];
499 enum info_type type = UNEXPECTED;
502 struct saved_file *files;
503 struct failtest_call *dup;
505 if (call == &unrecorded_call)
509 /* + means continue after end, like normal. */
510 if (*failpath == '+')
512 else if (*failpath == '\0') {
513 /* Continue, but don't inject errors. */
514 return call->fail = false;
516 if (tolower((unsigned char)*failpath)
517 != info_to_arg[call->type])
518 errx(1, "Failpath expected '%c' got '%c'\n",
519 info_to_arg[call->type], *failpath);
520 call->fail = cisupper(*(failpath++));
525 /* Attach debugger if they asked for it. */
529 /* Pretend this last call matches whatever path wanted:
530 * keeps valgrind happy. */
531 call->fail = cisupper(debugpath[strlen(debugpath)-1]);
532 path = failpath_string();
534 if (streq(path, debugpath)) {
538 signal(SIGUSR1, SIG_IGN);
539 sprintf(str, "xterm -e gdb /proc/%d/exe %d &",
541 if (system(str) == 0)
544 /* Ignore last character: could be upper or lower. */
545 path[strlen(path)-1] = '\0';
546 if (!strstarts(debugpath, path)) {
548 "--debugpath not followed: %s\n", path);
555 /* Are we probing? If so, we never fail twice. */
557 return call->fail = false;
559 /* Don't more than once in the same place. */
560 dup = failtable_get(&failtable, call);
562 return call->fail = false;
565 switch (failtest_hook(&history)) {
579 /* Add it to our table of calls. */
580 failtable_add(&failtable, call);
582 files = save_files();
584 /* We're going to fail in the child. */
586 if (pipe(control) != 0 || pipe(output) != 0)
587 err(1, "opening pipe");
589 /* Prevent double-printing (in child and parent) */
593 err(1, "forking failed");
600 struct failtest_call *c;
602 c = tlist_tail(&history, struct failtest_call, list);
603 diff = time_sub(time_now(), start);
604 failpath = failpath_string();
605 trace("%u->%u (%u.%02u): %s (", getppid(), getpid(),
606 (int)diff.tv_sec, (int)diff.tv_usec / 10000,
609 p = strrchr(c->file, '/');
613 trace("%s", c->file);
614 trace(":%u)\n", c->line);
618 dup2(output[1], STDOUT_FILENO);
619 dup2(output[1], STDERR_FILENO);
620 if (output[1] != STDOUT_FILENO && output[1] != STDERR_FILENO)
622 control_fd = move_fd_to_high(control[1]);
623 /* Valgrind spots the leak if we don't free these. */
628 signal(SIGUSR1, hand_down);
633 /* We grab output so we can display it; we grab writes so we
636 struct pollfd pfd[2];
639 pfd[0].fd = output[0];
640 pfd[0].events = POLLIN|POLLHUP;
641 pfd[1].fd = control[0];
642 pfd[1].events = POLLIN|POLLHUP;
645 ret = poll(pfd, 1, failtest_timeout_ms);
647 ret = poll(pfd, 2, failtest_timeout_ms);
654 err(1, "Poll returned %i", ret);
657 if (pfd[0].revents & POLLIN) {
660 out = realloc(out, outlen + 8192);
661 len = read(output[0], out + outlen, 8192);
663 } else if (type != SUCCESS && (pfd[1].revents & POLLIN)) {
664 if (read_all(control[0], &type, sizeof(type))) {
666 if (!read_write_info(control[0]))
668 } else if (type == RELEASE_LOCKS) {
670 /* FIXME: Tell them we're done... */
673 } else if (pfd[0].revents & POLLHUP) {
676 } while (type != FAILURE);
680 waitpid(child, &status, 0);
681 if (!WIFEXITED(status)) {
682 if (WTERMSIG(status) == SIGUSR1)
683 child_fail(out, outlen, "Timed out");
685 child_fail(out, outlen, "Killed by signal %u: ",
688 /* Child printed failure already, just pass up exit code. */
689 if (type == FAILURE) {
690 fprintf(stderr, "%.*s", (int)outlen, out);
692 exit(WEXITSTATUS(status) ? WEXITSTATUS(status) : 1);
694 if (WEXITSTATUS(status) != 0)
695 child_fail(out, outlen, "Exited with status %i: ",
696 WEXITSTATUS(status));
699 signal(SIGUSR1, SIG_DFL);
701 restore_files(files);
703 /* Only child does probe. */
706 /* We continue onwards without failing. */
711 static void cleanup_calloc(struct calloc_call *call)
716 void *failtest_calloc(size_t nmemb, size_t size,
717 const char *file, unsigned line)
719 struct failtest_call *p;
720 struct calloc_call call;
723 p = add_history(FAILTEST_CALLOC, file, line, &call);
725 if (should_fail(p)) {
726 p->u.calloc.ret = NULL;
729 p->u.calloc.ret = calloc(nmemb, size);
730 set_cleanup(p, cleanup_calloc, struct calloc_call);
733 return p->u.calloc.ret;
736 static void cleanup_malloc(struct malloc_call *call)
741 void *failtest_malloc(size_t size, const char *file, unsigned line)
743 struct failtest_call *p;
744 struct malloc_call call;
747 p = add_history(FAILTEST_MALLOC, file, line, &call);
748 if (should_fail(p)) {
749 p->u.malloc.ret = NULL;
752 p->u.malloc.ret = malloc(size);
753 set_cleanup(p, cleanup_malloc, struct malloc_call);
756 return p->u.malloc.ret;
759 static void cleanup_realloc(struct realloc_call *call)
764 /* Walk back and find out if we got this ptr from a previous routine. */
765 static void fixup_ptr_history(void *ptr)
767 struct failtest_call *i;
769 /* Start at end of history, work back. */
770 tlist_for_each_rev(&history, i, list) {
772 case FAILTEST_REALLOC:
773 if (i->u.realloc.ret == ptr) {
778 case FAILTEST_MALLOC:
779 if (i->u.malloc.ret == ptr) {
784 case FAILTEST_CALLOC:
785 if (i->u.calloc.ret == ptr) {
796 void *failtest_realloc(void *ptr, size_t size, const char *file, unsigned line)
798 struct failtest_call *p;
799 struct realloc_call call;
801 p = add_history(FAILTEST_REALLOC, file, line, &call);
803 /* FIXME: Try one child moving allocation, one not. */
804 if (should_fail(p)) {
805 p->u.realloc.ret = NULL;
808 /* Don't catch this one in the history fixup... */
809 p->u.realloc.ret = NULL;
810 fixup_ptr_history(ptr);
811 p->u.realloc.ret = realloc(ptr, size);
812 set_cleanup(p, cleanup_realloc, struct realloc_call);
815 return p->u.realloc.ret;
818 void failtest_free(void *ptr)
820 fixup_ptr_history(ptr);
824 static void cleanup_open(struct open_call *call)
829 int failtest_open(const char *pathname,
830 const char *file, unsigned line, ...)
832 struct failtest_call *p;
833 struct open_call call;
836 call.pathname = strdup(pathname);
838 call.flags = va_arg(ap, int);
839 if (call.flags & O_CREAT) {
840 call.mode = va_arg(ap, int);
843 p = add_history(FAILTEST_OPEN, file, line, &call);
844 /* Avoid memory leak! */
845 if (p == &unrecorded_call)
846 free((char *)call.pathname);
847 p->u.open.ret = open(pathname, call.flags, call.mode);
849 if (p->u.open.ret == -1) {
852 } else if (should_fail(p)) {
853 close(p->u.open.ret);
855 /* FIXME: Play with error codes? */
858 set_cleanup(p, cleanup_open, struct open_call);
861 return p->u.open.ret;
864 void *failtest_mmap(void *addr, size_t length, int prot, int flags,
865 int fd, off_t offset, const char *file, unsigned line)
867 struct failtest_call *p;
868 struct mmap_call call;
871 call.length = length;
874 call.offset = offset;
877 p = add_history(FAILTEST_MMAP, file, line, &call);
878 if (should_fail(p)) {
879 p->u.mmap.ret = MAP_FAILED;
882 p->u.mmap.ret = mmap(addr, length, prot, flags, fd, offset);
885 return p->u.mmap.ret;
888 static void cleanup_pipe(struct pipe_call *call)
890 if (!call->closed[0])
892 if (!call->closed[1])
896 int failtest_pipe(int pipefd[2], const char *file, unsigned line)
898 struct failtest_call *p;
899 struct pipe_call call;
901 p = add_history(FAILTEST_PIPE, file, line, &call);
902 if (should_fail(p)) {
904 /* FIXME: Play with error codes? */
907 p->u.pipe.ret = pipe(p->u.pipe.fds);
908 p->u.pipe.closed[0] = p->u.pipe.closed[1] = false;
909 set_cleanup(p, cleanup_pipe, struct pipe_call);
911 /* This causes valgrind to notice if they use pipefd[] after failure */
912 memcpy(pipefd, p->u.pipe.fds, sizeof(p->u.pipe.fds));
914 return p->u.pipe.ret;
917 ssize_t failtest_pread(int fd, void *buf, size_t count, off_t off,
918 const char *file, unsigned line)
920 struct failtest_call *p;
921 struct read_call call;
926 p = add_history(FAILTEST_READ, file, line, &call);
928 /* FIXME: Try partial read returns. */
929 if (should_fail(p)) {
933 p->u.read.ret = pread(fd, buf, count, off);
936 return p->u.read.ret;
939 ssize_t failtest_pwrite(int fd, const void *buf, size_t count, off_t off,
940 const char *file, unsigned line)
942 struct failtest_call *p;
943 struct write_call call;
949 p = add_history(FAILTEST_WRITE, file, line, &call);
951 /* If we're a child, we need to make sure we write the same thing
952 * to non-files as the parent does, so tell it. */
953 if (control_fd != -1 && off == (off_t)-1) {
954 enum info_type type = WRITE;
956 write_all(control_fd, &type, sizeof(type));
957 write_all(control_fd, &p->u.write, sizeof(p->u.write));
958 write_all(control_fd, buf, count);
961 /* FIXME: Try partial write returns. */
962 if (should_fail(p)) {
966 /* FIXME: We assume same write order in parent and child */
967 if (off == (off_t)-1 && child_writes_num != 0) {
968 if (child_writes[0].fd != fd)
969 errx(1, "Child wrote to fd %u, not %u?",
970 child_writes[0].fd, fd);
971 if (child_writes[0].off != p->u.write.off)
972 errx(1, "Child wrote to offset %zu, not %zu?",
973 (size_t)child_writes[0].off,
974 (size_t)p->u.write.off);
975 if (child_writes[0].count != count)
976 errx(1, "Child wrote length %zu, not %zu?",
977 child_writes[0].count, count);
978 if (memcmp(child_writes[0].buf, buf, count)) {
980 "Child wrote differently to"
981 " fd %u than we did!\n", fd);
983 free((char *)child_writes[0].buf);
985 memmove(&child_writes[0], &child_writes[1],
986 sizeof(child_writes[0]) * child_writes_num);
988 /* Is this is a socket or pipe, child wrote it
990 if (p->u.write.off == (off_t)-1) {
991 p->u.write.ret = count;
993 return p->u.write.ret;
996 p->u.write.ret = pwrite(fd, buf, count, off);
999 return p->u.write.ret;
1002 ssize_t failtest_read(int fd, void *buf, size_t count,
1003 const char *file, unsigned line)
1005 return failtest_pread(fd, buf, count, lseek(fd, 0, SEEK_CUR),
1009 ssize_t failtest_write(int fd, const void *buf, size_t count,
1010 const char *file, unsigned line)
1012 return failtest_pwrite(fd, buf, count, lseek(fd, 0, SEEK_CUR),
1016 static struct lock_info *WARN_UNUSED_RESULT
1017 add_lock(struct lock_info *locks, int fd, off_t start, off_t end, int type)
1020 struct lock_info *l;
1022 for (i = 0; i < lock_num; i++) {
1027 /* Four cases we care about:
1041 if (start > l->start && end < l->end) {
1042 /* Mid overlap: trim entry, add new one. */
1043 off_t new_start, new_end;
1044 new_start = end + 1;
1047 locks = add_lock(locks,
1048 fd, new_start, new_end, l->type);
1050 } else if (start <= l->start && end >= l->end) {
1051 /* Total overlap: eliminate entry. */
1054 } else if (end >= l->start && end < l->end) {
1055 /* Start overlap: trim entry. */
1057 } else if (start > l->start && start <= l->end) {
1058 /* End overlap: trim entry. */
1061 /* Nothing left? Remove it. */
1062 if (l->end < l->start) {
1063 memmove(l, l + 1, (--lock_num - i) * sizeof(l[0]));
1068 if (type != F_UNLCK) {
1069 locks = realloc(locks, (lock_num + 1) * sizeof(*locks));
1070 l = &locks[lock_num++];
1079 /* We trap this so we can record it: we don't fail it. */
1080 int failtest_close(int fd, const char *file, unsigned line)
1082 struct failtest_call *i;
1083 struct close_call call;
1084 struct failtest_call *p;
1087 p = add_history(FAILTEST_CLOSE, file, line, &call);
1090 /* Consume close from failpath. */
1098 /* Trace history to find source of fd. */
1099 tlist_for_each_rev(&history, i, list) {
1103 if (i->u.pipe.fds[0] == fd) {
1104 assert(!i->u.pipe.closed[0]);
1105 i->u.pipe.closed[0] = true;
1106 if (i->u.pipe.closed[1])
1110 if (i->u.pipe.fds[1] == fd) {
1111 assert(!i->u.pipe.closed[1]);
1112 i->u.pipe.closed[1] = true;
1113 if (i->u.pipe.closed[0])
1119 if (i->u.open.ret == fd) {
1120 assert((void *)i->cleanup
1121 == (void *)cleanup_open);
1132 locks = add_lock(locks, fd, 0, off_max(), F_UNLCK);
1136 /* Zero length means "to end of file" */
1137 static off_t end_of(off_t start, off_t len)
1141 return start + len - 1;
1144 /* FIXME: This only handles locks, really. */
1145 int failtest_fcntl(int fd, const char *file, unsigned line, int cmd, ...)
1147 struct failtest_call *p;
1148 struct fcntl_call call;
1154 /* Argument extraction. */
1159 call.arg.l = va_arg(ap, long);
1161 return fcntl(fd, cmd, call.arg.l);
1164 return fcntl(fd, cmd);
1168 call.arg.fl = *va_arg(ap, struct flock *);
1170 return fcntl(fd, cmd, &call.arg.fl);
1174 call.arg.fl = *va_arg(ap, struct flock *);
1178 /* This means you need to implement it here. */
1179 err(1, "failtest: unknown fcntl %u", cmd);
1182 p = add_history(FAILTEST_FCNTL, file, line, &call);
1184 if (should_fail(p)) {
1185 p->u.fcntl.ret = -1;
1186 if (p->u.fcntl.cmd == F_SETLK)
1192 p->u.fcntl.ret = fcntl(p->u.fcntl.fd, p->u.fcntl.cmd,
1193 &p->u.fcntl.arg.fl);
1194 if (p->u.fcntl.ret == -1)
1197 /* We don't handle anything else yet. */
1198 assert(p->u.fcntl.arg.fl.l_whence == SEEK_SET);
1199 locks = add_lock(locks,
1201 p->u.fcntl.arg.fl.l_start,
1202 end_of(p->u.fcntl.arg.fl.l_start,
1203 p->u.fcntl.arg.fl.l_len),
1204 p->u.fcntl.arg.fl.l_type);
1208 return p->u.fcntl.ret;
1211 pid_t failtest_getpid(const char *file, unsigned line)
1213 /* You must call failtest_init first! */
1218 void failtest_init(int argc, char *argv[])
1222 orig_pid = getpid();
1224 warnfd = move_fd_to_high(dup(STDERR_FILENO));
1225 for (i = 1; i < argc; i++) {
1226 if (!strncmp(argv[i], "--failpath=", strlen("--failpath="))) {
1227 failpath = argv[i] + strlen("--failpath=");
1228 } else if (strcmp(argv[i], "--tracepath") == 0) {
1230 failtest_timeout_ms = -1;
1231 } else if (!strncmp(argv[i], "--debugpath=",
1232 strlen("--debugpath="))) {
1233 debugpath = argv[i] + strlen("--debugpath=");
1236 failtable_init(&failtable);
1240 bool failtest_has_failed(void)
1242 return control_fd != -1;
1245 void failtest_exit(int status)
1247 if (failtest_exit_check) {
1248 if (!failtest_exit_check(&history))
1249 child_fail(NULL, 0, "failtest_exit_check failed\n");
1252 failtest_cleanup(false, status);