10 #include <sys/types.h>
15 #include <ccan/read_write_all/read_write_all.h>
16 #include <ccan/failtest/failtest_proto.h>
17 #include <ccan/failtest/failtest.h>
18 #include <ccan/build_assert/build_assert.h>
20 bool (*failtest_hook)(struct failtest_call *history, unsigned num)
21 = failtest_default_hook;
23 static int tracefd = -1;
25 unsigned int failtest_timeout_ms = 20000;
39 /* end is inclusive: you can't have a 0-byte lock. */
44 bool (*failtest_exit_check)(struct failtest_call *history, unsigned num);
46 static struct failtest_call *history = NULL;
47 static unsigned int history_num = 0;
48 static int control_fd = -1;
49 static struct timeval start;
51 static struct write_call *child_writes = NULL;
52 static unsigned int child_writes_num = 0;
54 static pid_t lock_owner;
55 static struct lock_info *locks = NULL;
56 static unsigned int lock_num = 0;
58 static const char info_to_arg[] = "mceoprwf";
60 /* Dummy call used for failtest_undo wrappers. */
61 static struct failtest_call unrecorded_call;
63 static struct failtest_call *add_history_(enum failtest_call_type type,
69 /* NULL file is how we suppress failure. */
71 return &unrecorded_call;
73 history = realloc(history, (history_num + 1) * sizeof(*history));
74 history[history_num].type = type;
75 history[history_num].file = file;
76 history[history_num].line = line;
77 history[history_num].cleanup = NULL;
78 memcpy(&history[history_num].u, elem, elem_size);
79 return &history[history_num++];
82 #define add_history(type, file, line, elem) \
83 add_history_((type), (file), (line), (elem), sizeof(*(elem)))
85 #define set_cleanup(call, clean, type) \
86 (call)->cleanup = (void *)((void)sizeof(clean((type *)NULL)), (clean))
88 bool failtest_default_hook(struct failtest_call *history, unsigned num)
93 static bool read_write_info(int fd)
98 /* We don't need all of this, but it's simple. */
99 child_writes = realloc(child_writes,
100 (child_writes_num+1) * sizeof(child_writes[0]));
101 w = &child_writes[child_writes_num];
102 if (!read_all(fd, w, sizeof(*w)))
105 w->buf = buf = malloc(w->count);
106 if (!read_all(fd, buf, w->count))
113 static char *failpath_string(void)
116 char *ret = malloc(history_num + 1);
118 for (i = 0; i < history_num; i++) {
119 ret[i] = info_to_arg[history[i].type];
121 ret[i] = toupper(ret[i]);
127 static void tell_parent(enum info_type type)
129 if (control_fd != -1)
130 write_all(control_fd, &type, sizeof(type));
133 static void child_fail(const char *out, size_t outlen, const char *fmt, ...)
136 char *path = failpath_string();
139 vfprintf(stderr, fmt, ap);
142 fprintf(stderr, "%.*s", (int)outlen, out);
143 printf("To reproduce: --failpath=%s\n", path);
145 tell_parent(FAILURE);
151 static void hand_down(int signal)
156 static void release_locks(void)
158 /* Locks were never acquired/reacquired? */
162 /* We own them? Release them all. */
163 if (lock_owner == getpid()) {
167 fl.l_whence = SEEK_SET;
171 for (i = 0; i < lock_num; i++)
172 fcntl(locks[i].fd, F_SETLK, &fl);
174 /* Our parent must have them; pass request up. */
175 enum info_type type = RELEASE_LOCKS;
176 assert(control_fd != -1);
177 write_all(control_fd, &type, sizeof(type));
182 /* off_t is a signed type. Getting its max is non-trivial. */
183 static off_t off_max(void)
185 BUILD_ASSERT(sizeof(off_t) == 4 || sizeof(off_t) == 8);
186 if (sizeof(off_t) == 4)
187 return (off_t)0x7FFFFFF;
189 return (off_t)0x7FFFFFFFFFFFFFFULL;
192 static void get_locks(void)
197 if (lock_owner == getpid())
200 if (lock_owner != 0) {
201 enum info_type type = RELEASE_LOCKS;
202 assert(control_fd != -1);
203 write_all(control_fd, &type, sizeof(type));
206 fl.l_whence = SEEK_SET;
208 for (i = 0; i < lock_num; i++) {
209 fl.l_type = locks[i].type;
210 fl.l_start = locks[i].start;
211 if (locks[i].end == off_max())
214 fl.l_len = locks[i].end - locks[i].start + 1;
216 if (fcntl(locks[i].fd, F_SETLKW, &fl) != 0)
219 lock_owner = getpid();
222 static void trace_str(const char *str)
226 while ((ret = write(tracefd, str, strlen(str))) <= 0) {
231 err(1, "Writing trace.");
234 static bool should_fail(struct failtest_call *call)
237 int control[2], output[2];
238 enum info_type type = UNEXPECTED;
242 if (call == &unrecorded_call)
246 /* + means continue after end, like normal. */
247 if (*failpath == '+')
250 if (tolower(*failpath) != info_to_arg[call->type])
251 errx(1, "Failpath expected '%c' got '%c'\n",
252 info_to_arg[call->type], *failpath);
253 call->fail = isupper(*(failpath++));
258 if (!failtest_hook(history, history_num)) {
263 /* We're going to fail in the child. */
265 if (pipe(control) != 0 || pipe(output) != 0)
266 err(1, "opening pipe");
268 /* Prevent double-printing (in child and parent) */
272 err(1, "forking failed");
278 gettimeofday(&now, NULL);
279 if (now.tv_usec < start.tv_usec) {
281 now.tv_usec += 1000000;
283 now.tv_usec -= start.tv_usec;
284 now.tv_sec -= start.tv_sec;
285 sprintf(str, "%u (%u.%02u): ", getpid(),
286 (int)now.tv_sec, (int)now.tv_usec / 10000);
288 p = failpath_string();
292 p = strchr(history[history_num-1].file, '/');
296 trace_str(history[history_num-1].file);
297 sprintf(str, ":%u)\n", history[history_num-1].line);
302 dup2(output[1], STDOUT_FILENO);
303 dup2(output[1], STDERR_FILENO);
304 if (output[1] != STDOUT_FILENO && output[1] != STDERR_FILENO)
306 control_fd = control[1];
310 signal(SIGUSR1, hand_down);
315 /* We grab output so we can display it; we grab writes so we
318 struct pollfd pfd[2];
321 pfd[0].fd = output[0];
322 pfd[0].events = POLLIN|POLLHUP;
323 pfd[1].fd = control[0];
324 pfd[1].events = POLLIN|POLLHUP;
327 ret = poll(pfd, 1, failtest_timeout_ms);
329 ret = poll(pfd, 2, failtest_timeout_ms);
334 if (pfd[0].revents & POLLIN) {
337 out = realloc(out, outlen + 8192);
338 len = read(output[0], out + outlen, 8192);
340 } else if (type != SUCCESS && (pfd[1].revents & POLLIN)) {
341 if (read_all(control[0], &type, sizeof(type))) {
343 if (!read_write_info(control[0]))
345 } else if (type == RELEASE_LOCKS) {
347 /* FIXME: Tell them we're done... */
350 } else if (pfd[0].revents & POLLHUP) {
353 } while (type != FAILURE);
357 waitpid(child, &status, 0);
358 if (!WIFEXITED(status))
359 child_fail(out, outlen, "Killed by signal %u: ",
361 /* Child printed failure already, just pass up exit code. */
362 if (type == FAILURE) {
363 fprintf(stderr, "%.*s", (int)outlen, out);
365 exit(WEXITSTATUS(status) ? WEXITSTATUS(status) : 1);
367 if (WEXITSTATUS(status) != 0)
368 child_fail(out, outlen, "Exited with status %i: ",
369 WEXITSTATUS(status));
372 signal(SIGUSR1, SIG_DFL);
374 /* We continue onwards without failing. */
379 static void cleanup_calloc(struct calloc_call *call)
384 void *failtest_calloc(size_t nmemb, size_t size,
385 const char *file, unsigned line)
387 struct failtest_call *p;
388 struct calloc_call call;
391 p = add_history(FAILTEST_CALLOC, file, line, &call);
393 if (should_fail(p)) {
394 p->u.calloc.ret = NULL;
397 p->u.calloc.ret = calloc(nmemb, size);
398 set_cleanup(p, cleanup_calloc, struct calloc_call);
401 return p->u.calloc.ret;
404 static void cleanup_malloc(struct malloc_call *call)
409 void *failtest_malloc(size_t size, const char *file, unsigned line)
411 struct failtest_call *p;
412 struct malloc_call call;
415 p = add_history(FAILTEST_MALLOC, file, line, &call);
416 if (should_fail(p)) {
417 p->u.calloc.ret = NULL;
420 p->u.calloc.ret = malloc(size);
421 set_cleanup(p, cleanup_malloc, struct malloc_call);
424 return p->u.calloc.ret;
427 static void cleanup_realloc(struct realloc_call *call)
432 /* Walk back and find out if we got this ptr from a previous routine. */
433 static void fixup_ptr_history(void *ptr, unsigned int last)
437 /* Start at end of history, work back. */
438 for (i = last - 1; i >= 0; i--) {
439 switch (history[i].type) {
440 case FAILTEST_REALLOC:
441 if (history[i].u.realloc.ret == ptr) {
442 history[i].cleanup = NULL;
446 case FAILTEST_MALLOC:
447 if (history[i].u.malloc.ret == ptr) {
448 history[i].cleanup = NULL;
452 case FAILTEST_CALLOC:
453 if (history[i].u.calloc.ret == ptr) {
454 history[i].cleanup = NULL;
464 void *failtest_realloc(void *ptr, size_t size, const char *file, unsigned line)
466 struct failtest_call *p;
467 struct realloc_call call;
469 p = add_history(FAILTEST_REALLOC, file, line, &call);
471 /* FIXME: Try one child moving allocation, one not. */
472 if (should_fail(p)) {
473 p->u.realloc.ret = NULL;
476 fixup_ptr_history(ptr, history_num-1);
477 p->u.realloc.ret = realloc(ptr, size);
478 set_cleanup(p, cleanup_realloc, struct realloc_call);
481 return p->u.realloc.ret;
484 void failtest_free(void *ptr)
486 fixup_ptr_history(ptr, history_num);
490 static void cleanup_open(struct open_call *call)
495 int failtest_open(const char *pathname,
496 const char *file, unsigned line, ...)
498 struct failtest_call *p;
499 struct open_call call;
502 call.pathname = strdup(pathname);
504 call.flags = va_arg(ap, int);
505 if (call.flags & O_CREAT) {
506 call.mode = va_arg(ap, mode_t);
509 p = add_history(FAILTEST_OPEN, file, line, &call);
510 /* Avoid memory leak! */
511 if (p == &unrecorded_call)
512 free((char *)call.pathname);
513 if (should_fail(p)) {
515 /* FIXME: Play with error codes? */
518 p->u.open.ret = open(pathname, call.flags, call.mode);
519 set_cleanup(p, cleanup_open, struct open_call);
520 p->u.open.dup_fd = p->u.open.ret;
523 return p->u.open.ret;
526 static void cleanup_pipe(struct pipe_call *call)
528 if (!call->closed[0])
530 if (!call->closed[1])
534 int failtest_pipe(int pipefd[2], const char *file, unsigned line)
536 struct failtest_call *p;
537 struct pipe_call call;
539 p = add_history(FAILTEST_PIPE, file, line, &call);
540 if (should_fail(p)) {
542 /* FIXME: Play with error codes? */
545 p->u.pipe.ret = pipe(p->u.pipe.fds);
546 p->u.pipe.closed[0] = p->u.pipe.closed[1] = false;
547 set_cleanup(p, cleanup_pipe, struct pipe_call);
549 /* This causes valgrind to notice if they use pipefd[] after failure */
550 memcpy(pipefd, p->u.pipe.fds, sizeof(p->u.pipe.fds));
552 return p->u.pipe.ret;
555 static void cleanup_read(struct read_call *call)
557 lseek(call->fd, call->off, SEEK_SET);
560 ssize_t failtest_pread(int fd, void *buf, size_t count, off_t off,
561 const char *file, unsigned line)
563 struct failtest_call *p;
564 struct read_call call;
569 p = add_history(FAILTEST_READ, file, line, &call);
571 /* FIXME: Try partial read returns. */
572 if (should_fail(p)) {
576 p->u.read.ret = pread(fd, buf, count, off);
577 set_cleanup(p, cleanup_read, struct read_call);
580 return p->u.read.ret;
583 static void cleanup_write(struct write_call *call)
585 lseek(call->dup_fd, call->off, SEEK_SET);
586 write(call->dup_fd, call->saved_contents, call->saved_len);
587 lseek(call->dup_fd, call->off, SEEK_SET);
588 ftruncate(call->dup_fd, call->old_filelen);
589 free(call->saved_contents);
592 ssize_t failtest_pwrite(int fd, const void *buf, size_t count, off_t off,
593 const char *file, unsigned line)
595 struct failtest_call *p;
596 struct write_call call;
598 call.fd = call.dup_fd = fd;
602 p = add_history(FAILTEST_WRITE, file, line, &call);
604 /* Save old contents if we can */
605 if (p->u.write.off != -1) {
607 p->u.write.old_filelen = lseek(fd, 0, SEEK_END);
609 /* Write past end of file? Nothing to save.*/
610 if (p->u.write.old_filelen <= p->u.write.off)
611 p->u.write.saved_len = 0;
612 /* Write which goes over end of file? Partial save. */
613 else if (p->u.write.off + count > p->u.write.old_filelen)
614 p->u.write.saved_len = p->u.write.old_filelen
618 p->u.write.saved_len = count;
620 p->u.write.saved_contents = malloc(p->u.write.saved_len);
621 lseek(fd, p->u.write.off, SEEK_SET);
622 ret = read(fd, p->u.write.saved_contents, p->u.write.saved_len);
623 if (ret != p->u.write.saved_len)
624 err(1, "Expected %i bytes, got %i",
625 (int)p->u.write.saved_len, (int)ret);
626 lseek(fd, p->u.write.off, SEEK_SET);
627 set_cleanup(p, cleanup_write, struct write_call);
630 /* If we're a child, tell parent about write. */
631 if (control_fd != -1) {
632 enum info_type type = WRITE;
634 write_all(control_fd, &type, sizeof(type));
635 write_all(control_fd, &p->u.write, sizeof(p->u.write));
636 write_all(control_fd, buf, count);
639 /* FIXME: Try partial write returns. */
640 if (should_fail(p)) {
644 /* FIXME: We assume same write order in parent and child */
645 if (child_writes_num != 0) {
646 if (child_writes[0].fd != fd)
647 errx(1, "Child wrote to fd %u, not %u?",
648 child_writes[0].fd, fd);
649 if (child_writes[0].off != p->u.write.off)
650 errx(1, "Child wrote to offset %zu, not %zu?",
651 (size_t)child_writes[0].off,
652 (size_t)p->u.write.off);
653 if (child_writes[0].count != count)
654 errx(1, "Child wrote length %zu, not %zu?",
655 child_writes[0].count, count);
656 if (memcmp(child_writes[0].buf, buf, count)) {
658 "Child wrote differently to"
659 " fd %u than we did!\n", fd);
661 free((char *)child_writes[0].buf);
663 memmove(&child_writes[0], &child_writes[1],
664 sizeof(child_writes[0]) * child_writes_num);
666 /* Is this is a socket or pipe, child wrote it
668 if (p->u.write.off == (off_t)-1) {
669 p->u.write.ret = count;
671 return p->u.write.ret;
674 p->u.write.ret = pwrite(fd, buf, count, off);
677 return p->u.write.ret;
680 ssize_t failtest_read(int fd, void *buf, size_t count,
681 const char *file, unsigned line)
683 return failtest_pread(fd, buf, count, lseek(fd, 0, SEEK_CUR),
687 ssize_t failtest_write(int fd, const void *buf, size_t count,
688 const char *file, unsigned line)
690 return failtest_pwrite(fd, buf, count, lseek(fd, 0, SEEK_CUR),
694 static struct lock_info *WARN_UNUSED_RESULT
695 add_lock(struct lock_info *locks, int fd, off_t start, off_t end, int type)
700 for (i = 0; i < lock_num; i++) {
705 /* Four cases we care about:
719 if (start > l->start && end < l->end) {
720 /* Mid overlap: trim entry, add new one. */
721 off_t new_start, new_end;
725 locks = add_lock(locks,
726 fd, new_start, new_end, l->type);
728 } else if (start <= l->start && end >= l->end) {
729 /* Total overlap: eliminate entry. */
732 } else if (end >= l->start && end < l->end) {
733 /* Start overlap: trim entry. */
735 } else if (start > l->start && start <= l->end) {
736 /* End overlap: trim entry. */
739 /* Nothing left? Remove it. */
740 if (l->end < l->start) {
741 memmove(l, l + 1, (--lock_num - i) * sizeof(l[0]));
746 if (type != F_UNLCK) {
747 locks = realloc(locks, (lock_num + 1) * sizeof(*locks));
748 l = &locks[lock_num++];
757 /* We only trap this so we can dup fds in case we need to restore. */
758 int failtest_close(int fd)
765 /* Trace history to find source of fd, and if we need to cleanup writes. */
766 for (i = history_num-1; i >= 0; i--) {
767 switch (history[i].type) {
769 if (history[i].u.write.fd != fd)
771 if (!history[i].cleanup)
773 /* We need to save fd so we can restore file. */
776 history[i].u.write.dup_fd = new_fd;
779 /* We don't need to cleanup reads on closed fds. */
780 if (history[i].u.read.fd != fd)
782 history[i].cleanup = NULL;
785 /* From a pipe? We don't ever restore pipes... */
786 if (history[i].u.pipe.fds[0] == fd) {
787 assert(new_fd == -1);
788 history[i].u.pipe.closed[0] = true;
791 if (history[i].u.pipe.fds[1] == fd) {
792 assert(new_fd == -1);
793 history[i].u.pipe.closed[1] = true;
798 if (history[i].u.open.ret == fd) {
799 history[i].u.open.dup_fd = new_fd;
809 locks = add_lock(locks, fd, 0, off_max(), F_UNLCK);
813 /* Zero length means "to end of file" */
814 static off_t end_of(off_t start, off_t len)
818 return start + len - 1;
821 /* FIXME: This only handles locks, really. */
822 int failtest_fcntl(int fd, const char *file, unsigned line, int cmd, ...)
824 struct failtest_call *p;
825 struct fcntl_call call;
831 /* Argument extraction. */
836 call.arg.l = va_arg(ap, long);
838 return fcntl(fd, cmd, call.arg.l);
841 return fcntl(fd, cmd);
845 call.arg.fl = *va_arg(ap, struct flock *);
847 return fcntl(fd, cmd, &call.arg.fl);
851 call.arg.fl = *va_arg(ap, struct flock *);
855 /* This means you need to implement it here. */
856 err(1, "failtest: unknown fcntl %u", cmd);
859 p = add_history(FAILTEST_FCNTL, file, line, &call);
862 if (should_fail(p)) {
864 if (p->u.fcntl.cmd == F_SETLK)
869 p->u.fcntl.ret = fcntl(p->u.fcntl.fd, p->u.fcntl.cmd,
871 if (p->u.fcntl.ret == -1)
874 /* We don't handle anything else yet. */
875 assert(p->u.fcntl.arg.fl.l_whence == SEEK_SET);
876 locks = add_lock(locks,
878 p->u.fcntl.arg.fl.l_start,
879 end_of(p->u.fcntl.arg.fl.l_start,
880 p->u.fcntl.arg.fl.l_len),
881 p->u.fcntl.arg.fl.l_type);
885 return p->u.fcntl.ret;
888 void failtest_init(int argc, char *argv[])
892 for (i = 1; i < argc; i++) {
893 if (!strncmp(argv[i], "--failpath=", strlen("--failpath="))) {
894 failpath = argv[i] + strlen("--failpath=");
895 } else if (strcmp(argv[i], "--tracepath") == 0) {
896 tracefd = dup(STDERR_FILENO);
897 failtest_timeout_ms = -1;
900 gettimeofday(&start, NULL);
903 /* Free up memory, so valgrind doesn't report leaks. */
904 static void free_everything(void)
908 /* We don't do this in cleanup: needed even for failed opens. */
909 for (i = 0; i < history_num; i++) {
910 if (history[i].type == FAILTEST_OPEN)
911 free((char *)history[i].u.open.pathname);
916 void failtest_exit(int status)
920 if (control_fd == -1) {
925 if (failtest_exit_check) {
926 if (!failtest_exit_check(history, history_num))
927 child_fail(NULL, 0, "failtest_exit_check failed\n");
930 /* Cleanup everything, in reverse order. */
931 for (i = history_num - 1; i >= 0; i--)
932 if (history[i].cleanup)
933 history[i].cleanup(&history[i].u);
936 tell_parent(SUCCESS);