Merge branch 'ccantool'
[ccan] / ccan / failtest / failtest.c
1 /* Licensed under LGPL - see LICENSE file for details */
2 #include <ccan/failtest/failtest.h>
3 #include <stdarg.h>
4 #include <string.h>
5 #include <stdio.h>
6 #include <stdarg.h>
7 #include <ctype.h>
8 #include <err.h>
9 #include <unistd.h>
10 #include <poll.h>
11 #include <errno.h>
12 #include <sys/types.h>
13 #include <sys/wait.h>
14 #include <sys/stat.h>
15 #include <sys/time.h>
16 #include <sys/mman.h>
17 #include <sys/resource.h>
18 #include <signal.h>
19 #include <assert.h>
20 #include <ccan/time/time.h>
21 #include <ccan/read_write_all/read_write_all.h>
22 #include <ccan/failtest/failtest_proto.h>
23 #include <ccan/build_assert/build_assert.h>
24 #include <ccan/hash/hash.h>
25 #include <ccan/htable/htable_type.h>
26 #include <ccan/str/str.h>
27 #include <ccan/compiler/compiler.h>
28
29 enum failtest_result (*failtest_hook)(struct tlist_calls *);
30
31 static FILE *tracef = NULL, *warnf;
32 static int traceindent = 0;
33
34 unsigned int failtest_timeout_ms = 20000;
35
36 const char *failpath;
37 const char *debugpath;
38
39 enum info_type {
40         WRITE,
41         RELEASE_LOCKS,
42         FAILURE,
43         SUCCESS,
44         UNEXPECTED
45 };
46
47 struct lock_info {
48         int fd;
49         /* end is inclusive: you can't have a 0-byte lock. */
50         off_t start, end;
51         int type;
52 };
53
54 /* We hash the call location together with its backtrace. */
55 static size_t hash_call(const struct failtest_call *call)
56 {
57         return hash(call->file, strlen(call->file),
58                     hash(&call->line, 1,
59                          hash(call->backtrace, call->backtrace_num,
60                               call->type)));
61 }
62
63 static bool call_eq(const struct failtest_call *call1,
64                     const struct failtest_call *call2)
65 {
66         unsigned int i;
67
68         if (strcmp(call1->file, call2->file) != 0
69             || call1->line != call2->line
70             || call1->type != call2->type
71             || call1->backtrace_num != call2->backtrace_num)
72                 return false;
73
74         for (i = 0; i < call1->backtrace_num; i++)
75                 if (call1->backtrace[i] != call2->backtrace[i])
76                         return false;
77
78         return true;
79 }
80
81 /* Defines struct failtable. */
82 HTABLE_DEFINE_TYPE(struct failtest_call, (struct failtest_call *), hash_call,
83                    call_eq, failtable);
84
85 bool (*failtest_exit_check)(struct tlist_calls *history);
86
87 /* The entire history of all calls. */
88 static struct tlist_calls history = TLIST_INIT(history);
89 /* If we're a child, the fd two write control info to the parent. */
90 static int control_fd = -1;
91 /* If we're a child, this is the first call we did ourselves. */
92 static struct failtest_call *our_history_start = NULL;
93 /* For printing runtime with --trace. */
94 static struct timeval start;
95 /* Set when failtest_hook returns FAIL_PROBE */
96 static bool probing = false;
97 /* Table to track duplicates. */
98 static struct failtable failtable;
99
100 /* Array of writes which our child did.  We report them on failure. */
101 static struct write_call *child_writes = NULL;
102 static unsigned int child_writes_num = 0;
103
104 /* fcntl locking info. */
105 static pid_t lock_owner;
106 static struct lock_info *locks = NULL;
107 static unsigned int lock_num = 0;
108
109 /* Our original pid, which we return to anyone who asks. */
110 static pid_t orig_pid;
111
112 /* Mapping from failtest_type to char. */
113 static const char info_to_arg[] = "mceoxprwfal";
114
115 /* Dummy call used for failtest_undo wrappers. */
116 static struct failtest_call unrecorded_call;
117
118 struct contents_saved {
119         size_t count;
120         off_t off;
121         off_t old_len;
122         char contents[1];
123 };
124
125 /* File contents, saved in this child only. */
126 struct saved_mmapped_file {
127         struct saved_mmapped_file *next;
128         struct failtest_call *opener;
129         struct contents_saved *s;
130 };
131
132 static struct saved_mmapped_file *saved_mmapped_files;
133
134 #if HAVE_BACKTRACE
135 #include <execinfo.h>
136
137 static void **get_backtrace(unsigned int *num)
138 {
139         static unsigned int max_back = 100;
140         void **ret;
141
142 again:
143         ret = malloc(max_back * sizeof(void *));
144         *num = backtrace(ret, max_back);
145         if (*num == max_back) {
146                 free(ret);
147                 max_back *= 2;
148                 goto again;
149         }
150         return ret;
151 }
152 #else
153 /* This will test slightly less, since will consider all of the same
154  * calls as identical.  But, it's slightly faster! */
155 static void **get_backtrace(unsigned int *num)
156 {
157         *num = 0;
158         return NULL;
159 }
160 #endif /* HAVE_BACKTRACE */
161
162 static struct failtest_call *add_history_(enum failtest_call_type type,
163                                           bool can_leak,
164                                           const char *file,
165                                           unsigned int line,
166                                           const void *elem,
167                                           size_t elem_size)
168 {
169         struct failtest_call *call;
170
171         /* NULL file is how we suppress failure. */
172         if (!file)
173                 return &unrecorded_call;
174
175         call = malloc(sizeof *call);
176         call->type = type;
177         call->can_leak = can_leak;
178         call->file = file;
179         call->line = line;
180         call->cleanup = NULL;
181         call->backtrace = get_backtrace(&call->backtrace_num);
182         memcpy(&call->u, elem, elem_size);
183         tlist_add_tail(&history, call, list);
184         return call;
185 }
186
187 #define add_history(type, can_leak, file, line, elem)           \
188         add_history_((type), (can_leak), (file), (line), (elem), sizeof(*(elem)))
189
190 /* We do a fake call inside a sizeof(), to check types. */
191 #define set_cleanup(call, clean, type)                  \
192         (call)->cleanup = (void *)((void)sizeof(clean((type *)NULL, false),1), (clean))
193
194 /* Dup the fd to a high value (out of the way I hope!), and close the old fd. */
195 static int move_fd_to_high(int fd)
196 {
197         int i;
198         struct rlimit lim;
199         int max;
200
201         if (getrlimit(RLIMIT_NOFILE, &lim) == 0) {
202                 max = lim.rlim_cur;
203                 printf("Max is %i\n", max);
204         } else
205                 max = FD_SETSIZE;
206
207         for (i = max - 1; i > fd; i--) {
208                 if (fcntl(i, F_GETFL) == -1 && errno == EBADF) {
209                         if (dup2(fd, i) == -1) {
210                                 warn("Failed to dup fd %i to %i", fd, i);
211                                 continue;
212                         }
213                         close(fd);
214                         return i;
215                 }
216         }
217         /* Nothing?  Really?  Er... ok? */
218         return fd;
219 }
220
221 static bool read_write_info(int fd)
222 {
223         struct write_call *w;
224         char *buf;
225
226         /* We don't need all of this, but it's simple. */
227         child_writes = realloc(child_writes,
228                                (child_writes_num+1) * sizeof(child_writes[0]));
229         w = &child_writes[child_writes_num];
230         if (!read_all(fd, w, sizeof(*w)))
231                 return false;
232
233         w->buf = buf = malloc(w->count);
234         if (!read_all(fd, buf, w->count))
235                 return false;
236
237         child_writes_num++;
238         return true;
239 }
240
241 static char *failpath_string(void)
242 {
243         struct failtest_call *i;
244         char *ret = strdup("");
245         unsigned len = 0;
246
247         /* Inefficient, but who cares? */
248         tlist_for_each(&history, i, list) {
249                 ret = realloc(ret, len + 2);
250                 ret[len] = info_to_arg[i->type];
251                 if (i->fail)
252                         ret[len] = toupper(ret[len]);
253                 ret[++len] = '\0';
254         }
255         return ret;
256 }
257
258 static void do_warn(int e, const char *fmt, va_list ap)
259 {
260         char *p = failpath_string();
261
262         vfprintf(warnf, fmt, ap);
263         if (e != -1)
264                 fprintf(warnf, ": %s", strerror(e));
265         fprintf(warnf, " [%s]\n", p);
266         free(p);
267 }
268
269 static void fwarn(const char *fmt, ...)
270 {
271         va_list ap;
272         int e = errno;
273
274         va_start(ap, fmt);
275         do_warn(e, fmt, ap);
276         va_end(ap);
277 }
278
279
280 static void fwarnx(const char *fmt, ...)
281 {
282         va_list ap;
283
284         va_start(ap, fmt);
285         do_warn(-1, fmt, ap);
286         va_end(ap);
287 }
288
289 static void tell_parent(enum info_type type)
290 {
291         if (control_fd != -1)
292                 write_all(control_fd, &type, sizeof(type));
293 }
294
295 static void child_fail(const char *out, size_t outlen, const char *fmt, ...)
296 {
297         va_list ap;
298         char *path = failpath_string();
299
300         va_start(ap, fmt);
301         vfprintf(stderr, fmt, ap);
302         va_end(ap);
303
304         fprintf(stderr, "%.*s", (int)outlen, out);
305         printf("To reproduce: --failpath=%s\n", path);
306         free(path);
307         tell_parent(FAILURE);
308         exit(1);
309 }
310
311 static void PRINTF_FMT(1, 2) trace(const char *fmt, ...)
312 {
313         va_list ap;
314         unsigned int i;
315         char *p;
316         static int idx;
317
318         if (!tracef)
319                 return;
320
321         for (i = 0; i < traceindent; i++)
322                 fprintf(tracef, "  ");
323
324         p = failpath_string();
325         fprintf(tracef, "%i: %u: %s ", idx++, getpid(), p);
326         va_start(ap, fmt);
327         vfprintf(tracef, fmt, ap);
328         va_end(ap);
329         free(p);
330 }
331
332 static pid_t child;
333
334 static void hand_down(int signum)
335 {
336         kill(child, signum);
337 }
338
339 static void release_locks(void)
340 {
341         /* Locks were never acquired/reacquired? */
342         if (lock_owner == 0)
343                 return;
344
345         /* We own them?  Release them all. */
346         if (lock_owner == getpid()) {
347                 unsigned int i;
348                 struct flock fl;
349                 fl.l_type = F_UNLCK;
350                 fl.l_whence = SEEK_SET;
351                 fl.l_start = 0;
352                 fl.l_len = 0;
353
354                 trace("Releasing %u locks\n", lock_num);
355                 for (i = 0; i < lock_num; i++)
356                         fcntl(locks[i].fd, F_SETLK, &fl);
357         } else {
358                 /* Our parent must have them; pass request up. */
359                 enum info_type type = RELEASE_LOCKS;
360                 assert(control_fd != -1);
361                 write_all(control_fd, &type, sizeof(type));
362         }
363         lock_owner = 0;
364 }
365
366 /* off_t is a signed type.  Getting its max is non-trivial. */
367 static off_t off_max(void)
368 {
369         BUILD_ASSERT(sizeof(off_t) == 4 || sizeof(off_t) == 8);
370         if (sizeof(off_t) == 4)
371                 return (off_t)0x7FFFFFF;
372         else
373                 return (off_t)0x7FFFFFFFFFFFFFFULL;
374 }
375
376 static void get_locks(void)
377 {
378         unsigned int i;
379         struct flock fl;
380
381         if (lock_owner == getpid())
382                 return;
383
384         if (lock_owner != 0) {
385                 enum info_type type = RELEASE_LOCKS;
386                 assert(control_fd != -1);
387                 trace("Asking parent to release locks\n");
388                 write_all(control_fd, &type, sizeof(type));
389         }
390
391         fl.l_whence = SEEK_SET;
392
393         for (i = 0; i < lock_num; i++) {
394                 fl.l_type = locks[i].type;
395                 fl.l_start = locks[i].start;
396                 if (locks[i].end == off_max())
397                         fl.l_len = 0;
398                 else
399                         fl.l_len = locks[i].end - locks[i].start + 1;
400
401                 if (fcntl(locks[i].fd, F_SETLKW, &fl) != 0)
402                         abort();
403         }
404         trace("Acquired %u locks\n", lock_num);
405         lock_owner = getpid();
406 }
407
408
409 static struct contents_saved *save_contents(const char *filename,
410                                             int fd, size_t count, off_t off,
411                                             const char *why)
412 {
413         struct contents_saved *s = malloc(sizeof(*s) + count);
414         ssize_t ret;
415
416         s->off = off;
417
418         ret = pread(fd, s->contents, count, off);
419         if (ret < 0) {
420                 fwarn("failtest_write: failed to save old contents!");
421                 s->count = 0;
422         } else
423                 s->count = ret;
424
425         /* Use lseek to get the size of file, but we have to restore
426          * file offset */
427         off = lseek(fd, 0, SEEK_CUR);
428         s->old_len = lseek(fd, 0, SEEK_END);
429         lseek(fd, off, SEEK_SET);
430
431         trace("Saving %p %s %zu@%llu after %s (filelength %llu) via fd %i\n",
432               s, filename, s->count, (long long)s->off, why,
433               (long long)s->old_len, fd);
434         return s;
435 }
436
437 static void restore_contents(struct failtest_call *opener,
438                              struct contents_saved *s,
439                              bool restore_offset,
440                              const char *caller)
441 {
442         int fd;
443
444         /* The top parent doesn't need to restore. */
445         if (control_fd == -1)
446                 return;
447
448         /* Has the fd been closed? */
449         if (opener->u.open.closed) {
450                 /* Reopen, replace fd, close silently as we clean up. */
451                 fd = open(opener->u.open.pathname, O_RDWR);
452                 if (fd < 0) {
453                         fwarn("failtest: could not reopen %s to clean up %s!",
454                               opener->u.open.pathname, caller);
455                         return;
456                 }
457                 /* Make it clearly distinguisable from a "normal" fd. */
458                 fd = move_fd_to_high(fd);
459                 trace("Reopening %s to restore it (was fd %i, now %i)\n",
460                       opener->u.open.pathname, opener->u.open.ret, fd);
461                 opener->u.open.ret = fd;
462                 opener->u.open.closed = false;
463         }
464         fd = opener->u.open.ret;
465
466         trace("Restoring %p %s %zu@%llu after %s (filelength %llu) via fd %i\n",
467               s, opener->u.open.pathname, s->count, (long long)s->off, caller,
468               (long long)s->old_len, fd);
469         if (pwrite(fd, s->contents, s->count, s->off) != s->count) {
470                 fwarn("failtest: write failed cleaning up %s for %s!",
471                       opener->u.open.pathname, caller);
472         }
473
474         if (ftruncate(fd, s->old_len) != 0) {
475                 fwarn("failtest_write: truncate failed cleaning up %s for %s!",
476                       opener->u.open.pathname, caller);
477         }
478
479         if (restore_offset) {
480                 trace("Restoring offset of fd %i to %llu\n",
481                       fd, (long long)s->off);
482                 lseek(fd, s->off, SEEK_SET);
483         }
484 }
485
486 /* We save/restore most things on demand, but always do mmaped files. */
487 static void save_mmapped_files(void)
488 {
489         struct failtest_call *i;
490         trace("Saving mmapped files in child\n");
491
492         tlist_for_each_rev(&history, i, list) {
493                 struct mmap_call *m = &i->u.mmap;
494                 struct saved_mmapped_file *s;
495
496                 if (i->type != FAILTEST_MMAP)
497                         continue;
498
499                 /* FIXME: We only handle mmapped files where fd is still open. */
500                 if (m->opener->u.open.closed)
501                         continue;
502
503                 s = malloc(sizeof *s);
504                 s->s = save_contents(m->opener->u.open.pathname,
505                                      m->fd, m->length, m->offset,
506                                      "mmapped file before fork");
507                 s->opener = m->opener;
508                 s->next = saved_mmapped_files;
509                 saved_mmapped_files = s;
510         }
511 }
512
513 static void free_mmapped_files(bool restore)
514 {
515         trace("%s mmapped files in child\n",
516               restore ? "Restoring" : "Discarding");
517         while (saved_mmapped_files) {
518                 struct saved_mmapped_file *next = saved_mmapped_files->next;
519                 if (restore)
520                         restore_contents(saved_mmapped_files->opener,
521                                          saved_mmapped_files->s, false,
522                                          "saved mmap");
523                 free(saved_mmapped_files->s);
524                 free(saved_mmapped_files);
525                 saved_mmapped_files = next;
526         }
527 }
528
529 /* Returns a FAILTEST_OPEN, FAILTEST_PIPE or NULL. */
530 static struct failtest_call *opener_of(int fd)
531 {
532         struct failtest_call *i;
533
534         /* Don't get confused and match genuinely failed opens. */
535         if (fd < 0)
536                 return NULL;
537
538         /* Figure out the set of live fds. */
539         tlist_for_each_rev(&history, i, list) {
540                 if (i->fail)
541                         continue;
542                 switch (i->type) {
543                 case FAILTEST_CLOSE:
544                         if (i->u.close.fd == fd) {
545                                 return NULL;
546                         }
547                         break;
548                 case FAILTEST_OPEN:
549                         if (i->u.open.ret == fd) {
550                                 if (i->u.open.closed)
551                                         return NULL;
552                                 return i;
553                         }
554                         break;
555                 case FAILTEST_PIPE:
556                         if (i->u.pipe.fds[0] == fd || i->u.pipe.fds[1] == fd) {
557                                 return i;
558                         }
559                         break;
560                 default:
561                         break;
562                 }
563         }
564
565         /* FIXME: socket, dup, etc are untracked! */
566         return NULL;
567 }
568
569 static void free_call(struct failtest_call *call)
570 {
571         /* We don't do this in cleanup: needed even for failed opens. */
572         if (call->type == FAILTEST_OPEN)
573                 free((char *)call->u.open.pathname);
574         free(call->backtrace);
575         tlist_del_from(&history, call, list);
576         free(call);
577 }
578
579 /* Free up memory, so valgrind doesn't report leaks. */
580 static void free_everything(void)
581 {
582         struct failtest_call *i;
583
584         while ((i = tlist_top(&history, list)) != NULL)
585                 free_call(i);
586
587         failtable_clear(&failtable);
588 }
589
590 static NORETURN void failtest_cleanup(bool forced_cleanup, int status)
591 {
592         struct failtest_call *i;
593         bool restore = true;
594
595         /* For children, we don't care if they "failed" the testing. */
596         if (control_fd != -1)
597                 status = 0;
598         else
599                 /* We don't restore contents for original parent. */
600                 restore = false;
601
602         /* Cleanup everything, in reverse order. */
603         tlist_for_each_rev(&history, i, list) {
604                 /* Don't restore things our parent did. */
605                 if (i == our_history_start)
606                         restore = false;
607
608                 if (i->fail)
609                         continue;
610
611                 if (i->cleanup)
612                         i->cleanup(&i->u, restore);
613
614                 /* But their program shouldn't leak, even on failure. */
615                 if (!forced_cleanup && i->can_leak) {
616                         printf("Leak at %s:%u: --failpath=%s\n",
617                                i->file, i->line, failpath_string());
618                         status = 1;
619                 }
620         }
621
622         /* Put back mmaped files the way our parent (if any) expects. */
623         free_mmapped_files(true);
624
625         free_everything();
626         if (status == 0)
627                 tell_parent(SUCCESS);
628         else
629                 tell_parent(FAILURE);
630         exit(status);
631 }
632
633 static bool following_path(void)
634 {
635         if (!failpath)
636                 return false;
637         /* + means continue after end, like normal. */
638         if (*failpath == '+') {
639                 failpath = NULL;
640                 return false;
641         }
642         return true;
643 }
644
645 static bool follow_path(struct failtest_call *call)
646 {
647         if (*failpath == '\0') {
648                 /* Continue, but don't inject errors. */
649                 return call->fail = false;
650         }
651
652         if (tolower((unsigned char)*failpath) != info_to_arg[call->type])
653                 errx(1, "Failpath expected '%s' got '%c'\n",
654                      failpath, info_to_arg[call->type]);
655         call->fail = cisupper(*(failpath++));
656                         if (call->fail)
657                                 call->can_leak = false;
658         return call->fail;
659 }
660
661 static bool should_fail(struct failtest_call *call)
662 {
663         int status;
664         int control[2], output[2];
665         enum info_type type = UNEXPECTED;
666         char *out = NULL;
667         size_t outlen = 0;
668         struct failtest_call *dup;
669
670         if (call == &unrecorded_call)
671                 return false;
672
673         if (following_path())
674                 return follow_path(call);
675
676         /* Attach debugger if they asked for it. */
677         if (debugpath) {
678                 char *path;
679
680                 /* Pretend this last call matches whatever path wanted:
681                  * keeps valgrind happy. */
682                 call->fail = cisupper(debugpath[strlen(debugpath)-1]);
683                 path = failpath_string();
684
685                 if (streq(path, debugpath)) {
686                         char str[80];
687
688                         /* Don't timeout. */
689                         signal(SIGUSR1, SIG_IGN);
690                         sprintf(str, "xterm -e gdb /proc/%d/exe %d &",
691                                 getpid(), getpid());
692                         if (system(str) == 0)
693                                 sleep(5);
694                 } else {
695                         /* Ignore last character: could be upper or lower. */
696                         path[strlen(path)-1] = '\0';
697                         if (!strstarts(debugpath, path)) {
698                                 fprintf(stderr,
699                                         "--debugpath not followed: %s\n", path);
700                                 debugpath = NULL;
701                         }
702                 }
703                 free(path);
704         }
705
706         /* Are we probing?  If so, we never fail twice. */
707         if (probing) {
708                 trace("Not failing %c due to FAIL_PROBE return\n",
709                       info_to_arg[call->type]);
710                 return call->fail = false;
711         }
712
713         /* Don't fail more than once in the same place. */
714         dup = failtable_get(&failtable, call);
715         if (dup) {
716                 trace("Not failing %c due to duplicate\n",
717                       info_to_arg[call->type]);
718                 return call->fail = false;
719         }
720
721         if (failtest_hook) {
722                 switch (failtest_hook(&history)) {
723                 case FAIL_OK:
724                         break;
725                 case FAIL_PROBE:
726                         probing = true;
727                         break;
728                 case FAIL_DONT_FAIL:
729                         trace("Not failing %c due to failhook return\n",
730                               info_to_arg[call->type]);
731                         call->fail = false;
732                         return false;
733                 default:
734                         abort();
735                 }
736         }
737
738         /* Add it to our table of calls. */
739         failtable_add(&failtable, call);
740
741         /* We're going to fail in the child. */
742         call->fail = true;
743         if (pipe(control) != 0 || pipe(output) != 0)
744                 err(1, "opening pipe");
745
746         /* Move out the way, to high fds. */
747         control[0] = move_fd_to_high(control[0]);
748         control[1] = move_fd_to_high(control[1]);
749         output[0] = move_fd_to_high(output[0]);
750         output[1] = move_fd_to_high(output[1]);
751
752         /* Prevent double-printing (in child and parent) */
753         fflush(stdout);
754         fflush(warnf);
755         if (tracef)
756                 fflush(tracef);
757         child = fork();
758         if (child == -1)
759                 err(1, "forking failed");
760
761         if (child == 0) {
762                 traceindent++;
763                 if (tracef) {
764                         struct timeval diff;
765                         const char *p;
766                         char *failpath;
767                         struct failtest_call *c;
768
769                         c = tlist_tail(&history, list);
770                         diff = time_sub(time_now(), start);
771                         failpath = failpath_string();
772                         p = strrchr(c->file, '/');
773                         if (p)
774                                 p++;
775                         else
776                                 p = c->file;
777                         trace("%u->%u (%u.%02u): %s (%s:%u)\n",
778                               getppid(), getpid(),
779                               (int)diff.tv_sec, (int)diff.tv_usec / 10000,
780                               failpath, p, c->line);
781                         free(failpath);
782                 }
783                 /* From here on, we have to clean up! */
784                 our_history_start = tlist_tail(&history, list);
785                 close(control[0]);
786                 close(output[0]);
787                 /* Don't swallow stderr if we're tracing. */
788                 if (!tracef) {
789                         dup2(output[1], STDOUT_FILENO);
790                         dup2(output[1], STDERR_FILENO);
791                         if (output[1] != STDOUT_FILENO
792                             && output[1] != STDERR_FILENO)
793                                 close(output[1]);
794                 }
795                 control_fd = move_fd_to_high(control[1]);
796
797                 /* Forget any of our parent's saved files. */
798                 free_mmapped_files(false);
799
800                 /* Now, save any files we need to. */
801                 save_mmapped_files();
802
803                 /* Failed calls can't leak. */
804                 call->can_leak = false;
805
806                 return true;
807         }
808
809         signal(SIGUSR1, hand_down);
810
811         close(control[1]);
812         close(output[1]);
813
814         /* We grab output so we can display it; we grab writes so we
815          * can compare. */
816         do {
817                 struct pollfd pfd[2];
818                 int ret;
819
820                 pfd[0].fd = output[0];
821                 pfd[0].events = POLLIN|POLLHUP;
822                 pfd[1].fd = control[0];
823                 pfd[1].events = POLLIN|POLLHUP;
824
825                 if (type == SUCCESS)
826                         ret = poll(pfd, 1, failtest_timeout_ms);
827                 else
828                         ret = poll(pfd, 2, failtest_timeout_ms);
829
830                 if (ret == 0)
831                         hand_down(SIGUSR1);
832                 if (ret < 0) {
833                         if (errno == EINTR)
834                                 continue;
835                         err(1, "Poll returned %i", ret);
836                 }
837
838                 if (pfd[0].revents & POLLIN) {
839                         ssize_t len;
840
841                         out = realloc(out, outlen + 8192);
842                         len = read(output[0], out + outlen, 8192);
843                         outlen += len;
844                 } else if (type != SUCCESS && (pfd[1].revents & POLLIN)) {
845                         if (read_all(control[0], &type, sizeof(type))) {
846                                 if (type == WRITE) {
847                                         if (!read_write_info(control[0]))
848                                                 break;
849                                 } else if (type == RELEASE_LOCKS) {
850                                         release_locks();
851                                         /* FIXME: Tell them we're done... */
852                                 }
853                         }
854                 } else if (pfd[0].revents & POLLHUP) {
855                         break;
856                 }
857         } while (type != FAILURE);
858
859         close(output[0]);
860         close(control[0]);
861         waitpid(child, &status, 0);
862         if (!WIFEXITED(status)) {
863                 if (WTERMSIG(status) == SIGUSR1)
864                         child_fail(out, outlen, "Timed out");
865                 else
866                         child_fail(out, outlen, "Killed by signal %u: ",
867                                    WTERMSIG(status));
868         }
869         /* Child printed failure already, just pass up exit code. */
870         if (type == FAILURE) {
871                 fprintf(stderr, "%.*s", (int)outlen, out);
872                 tell_parent(type);
873                 exit(WEXITSTATUS(status) ? WEXITSTATUS(status) : 1);
874         }
875         if (WEXITSTATUS(status) != 0)
876                 child_fail(out, outlen, "Exited with status %i: ",
877                            WEXITSTATUS(status));
878
879         free(out);
880         signal(SIGUSR1, SIG_DFL);
881
882         /* Only child does probe. */
883         probing = false;
884
885         /* We continue onwards without failing. */
886         call->fail = false;
887         return false;
888 }
889
890 static void cleanup_calloc(struct calloc_call *call, bool restore)
891 {
892         trace("undoing calloc %p\n", call->ret);
893         free(call->ret);
894 }
895
896 void *failtest_calloc(size_t nmemb, size_t size,
897                       const char *file, unsigned line)
898 {
899         struct failtest_call *p;
900         struct calloc_call call;
901         call.nmemb = nmemb;
902         call.size = size;
903         p = add_history(FAILTEST_CALLOC, true, file, line, &call);
904
905         if (should_fail(p)) {
906                 p->u.calloc.ret = NULL;
907                 p->error = ENOMEM;
908         } else {
909                 p->u.calloc.ret = calloc(nmemb, size);
910                 set_cleanup(p, cleanup_calloc, struct calloc_call);
911         }
912         trace("calloc %zu x %zu %s:%u -> %p\n",
913               nmemb, size, file, line, p->u.calloc.ret);
914         errno = p->error;
915         return p->u.calloc.ret;
916 }
917
918 static void cleanup_malloc(struct malloc_call *call, bool restore)
919 {
920         trace("undoing malloc %p\n", call->ret);
921         free(call->ret);
922 }
923
924 void *failtest_malloc(size_t size, const char *file, unsigned line)
925 {
926         struct failtest_call *p;
927         struct malloc_call call;
928         call.size = size;
929
930         p = add_history(FAILTEST_MALLOC, true, file, line, &call);
931         if (should_fail(p)) {
932                 p->u.malloc.ret = NULL;
933                 p->error = ENOMEM;
934         } else {
935                 p->u.malloc.ret = malloc(size);
936                 set_cleanup(p, cleanup_malloc, struct malloc_call);
937         }
938         trace("malloc %zu %s:%u -> %p\n",
939               size, file, line, p->u.malloc.ret);
940         errno = p->error;
941         return p->u.malloc.ret;
942 }
943
944 static void cleanup_realloc(struct realloc_call *call, bool restore)
945 {
946         trace("undoing realloc %p\n", call->ret);
947         free(call->ret);
948 }
949
950 /* Walk back and find out if we got this ptr from a previous routine. */
951 static void fixup_ptr_history(void *ptr, const char *why)
952 {
953         struct failtest_call *i;
954
955         /* Start at end of history, work back. */
956         tlist_for_each_rev(&history, i, list) {
957                 switch (i->type) {
958                 case FAILTEST_REALLOC:
959                         if (i->u.realloc.ret == ptr) {
960                                 trace("found realloc %p %s:%u matching %s\n",
961                                       ptr, i->file, i->line, why);
962                                 i->cleanup = NULL;
963                                 i->can_leak = false;
964                                 return;
965                         }
966                         break;
967                 case FAILTEST_MALLOC:
968                         if (i->u.malloc.ret == ptr) {
969                                 trace("found malloc %p %s:%u matching %s\n",
970                                       ptr, i->file, i->line, why);
971                                 i->cleanup = NULL;
972                                 i->can_leak = false;
973                                 return;
974                         }
975                         break;
976                 case FAILTEST_CALLOC:
977                         if (i->u.calloc.ret == ptr) {
978                                 trace("found calloc %p %s:%u matching %s\n",
979                                       ptr, i->file, i->line, why);
980                                 i->cleanup = NULL;
981                                 i->can_leak = false;
982                                 return;
983                         }
984                         break;
985                 default:
986                         break;
987                 }
988         }
989         trace("Did not find %p matching %s\n", ptr, why);
990 }
991
992 void *failtest_realloc(void *ptr, size_t size, const char *file, unsigned line)
993 {
994         struct failtest_call *p;
995         struct realloc_call call;
996         call.size = size;
997         p = add_history(FAILTEST_REALLOC, true, file, line, &call);
998
999         /* FIXME: Try one child moving allocation, one not. */
1000         if (should_fail(p)) {
1001                 p->u.realloc.ret = NULL;
1002                 p->error = ENOMEM;
1003         } else {
1004                 /* Don't catch this one in the history fixup... */
1005                 p->u.realloc.ret = NULL;
1006                 fixup_ptr_history(ptr, "realloc");
1007                 p->u.realloc.ret = realloc(ptr, size);
1008                 set_cleanup(p, cleanup_realloc, struct realloc_call);
1009         }
1010         trace("realloc %p %s:%u -> %p\n",
1011               ptr, file, line, p->u.realloc.ret);
1012         errno = p->error;
1013         return p->u.realloc.ret;
1014 }
1015
1016 /* FIXME: Record free, so we can terminate fixup_ptr_history correctly.
1017  * If there's an alloc we don't see, it could get confusing if it matches
1018  * a previous allocation we did see. */
1019 void failtest_free(void *ptr)
1020 {
1021         fixup_ptr_history(ptr, "free");
1022         trace("free %p\n", ptr);
1023         free(ptr);
1024 }
1025
1026
1027 static struct contents_saved *save_file(const char *pathname)
1028 {
1029         int fd;
1030         struct contents_saved *s;
1031
1032         fd = open(pathname, O_RDONLY);
1033         if (fd < 0)
1034                 return NULL;
1035
1036         s = save_contents(pathname, fd, lseek(fd, 0, SEEK_END), 0,
1037                           "open with O_TRUNC");
1038         close(fd);
1039         return s;
1040 }
1041
1042 /* Optimization: don't create a child for an open which *we know*
1043  * would fail anyway. */
1044 static bool open_would_fail(const char *pathname, int flags)
1045 {
1046         if ((flags & O_ACCMODE) == O_RDONLY)
1047                 return access(pathname, R_OK) != 0;
1048         if (!(flags & O_CREAT)) {
1049                 if ((flags & O_ACCMODE) == O_WRONLY)
1050                         return access(pathname, W_OK) != 0;
1051                 if ((flags & O_ACCMODE) == O_RDWR)
1052                         return access(pathname, W_OK) != 0
1053                                 || access(pathname, R_OK) != 0;
1054         }
1055         /* FIXME: We could check if it exists, for O_CREAT|O_EXCL */
1056         return false;
1057 }
1058
1059 static void cleanup_open(struct open_call *call, bool restore)
1060 {
1061         if (restore && call->saved)
1062                 restore_contents(container_of(call, struct failtest_call,
1063                                               u.open),
1064                                  call->saved, false, "open with O_TRUNC");
1065         if (!call->closed) {
1066                 trace("Cleaning up open %s by closing fd %i\n",
1067                       call->pathname, call->ret);
1068                 close(call->ret);
1069                 call->closed = true;
1070         }
1071         free(call->saved);
1072 }
1073
1074 int failtest_open(const char *pathname,
1075                   const char *file, unsigned line, ...)
1076 {
1077         struct failtest_call *p;
1078         struct open_call call;
1079         va_list ap;
1080
1081         call.pathname = strdup(pathname);
1082         va_start(ap, line);
1083         call.flags = va_arg(ap, int);
1084         call.always_save = false;
1085         call.closed = false;
1086         if (call.flags & O_CREAT) {
1087                 call.mode = va_arg(ap, int);
1088                 va_end(ap);
1089         }
1090         p = add_history(FAILTEST_OPEN, true, file, line, &call);
1091         /* Avoid memory leak! */
1092         if (p == &unrecorded_call)
1093                 free((char *)call.pathname);
1094
1095         if (should_fail(p)) {
1096                 /* Don't bother inserting failures that would happen anyway. */
1097                 if (open_would_fail(pathname, call.flags)) {
1098                         trace("Open would have failed anyway: stopping\n");
1099                         failtest_cleanup(true, 0);
1100                 }
1101                 p->u.open.ret = -1;
1102                 /* FIXME: Play with error codes? */
1103                 p->error = EACCES;
1104         } else {
1105                 /* Save the old version if they're truncating it. */
1106                 if (call.flags & O_TRUNC)
1107                         p->u.open.saved = save_file(pathname);
1108                 else
1109                         p->u.open.saved = NULL;
1110                 p->u.open.ret = open(pathname, call.flags, call.mode);
1111                 if (p->u.open.ret == -1) {
1112                         p->u.open.closed = true;
1113                         p->can_leak = false;
1114                 } else {
1115                         set_cleanup(p, cleanup_open, struct open_call);
1116                 }
1117         }
1118         trace("open %s %s:%u -> %i (opener %p)\n",
1119               pathname, file, line, p->u.open.ret, &p->u.open);
1120         errno = p->error;
1121         return p->u.open.ret;
1122 }
1123
1124 static void cleanup_mmap(struct mmap_call *mmap, bool restore)
1125 {
1126         trace("cleaning up mmap @%p (opener %p)\n",
1127               mmap->ret, mmap->opener);
1128         if (restore)
1129                 restore_contents(mmap->opener, mmap->saved, false, "mmap");
1130         free(mmap->saved);
1131 }
1132
1133 void *failtest_mmap(void *addr, size_t length, int prot, int flags,
1134                     int fd, off_t offset, const char *file, unsigned line)
1135 {
1136         struct failtest_call *p;
1137         struct mmap_call call;
1138
1139         call.addr = addr;
1140         call.length = length;
1141         call.prot = prot;
1142         call.flags = flags;
1143         call.offset = offset;
1144         call.fd = fd;
1145         call.opener = opener_of(fd);
1146
1147         /* If we don't know what file it was, don't fail. */
1148         if (!call.opener) {
1149                 if (fd != -1) {
1150                         fwarnx("failtest_mmap: couldn't figure out source for"
1151                                " fd %i at %s:%u", fd, file, line);
1152                 }
1153                 addr = mmap(addr, length, prot, flags, fd, offset);
1154                 trace("mmap of fd %i -> %p (opener = NULL)\n", fd, addr);
1155                 return addr;
1156         }
1157
1158         p = add_history(FAILTEST_MMAP, false, file, line, &call);
1159         if (should_fail(p)) {
1160                 p->u.mmap.ret = MAP_FAILED;
1161                 p->error = ENOMEM;
1162         } else {
1163                 p->u.mmap.ret = mmap(addr, length, prot, flags, fd, offset);
1164                 /* Save contents if we're writing to a normal file */
1165                 if (p->u.mmap.ret != MAP_FAILED
1166                     && (prot & PROT_WRITE)
1167                     && call.opener->type == FAILTEST_OPEN) {
1168                         const char *fname = call.opener->u.open.pathname;
1169                         p->u.mmap.saved = save_contents(fname, fd, length,
1170                                                         offset, "being mmapped");
1171                         set_cleanup(p, cleanup_mmap, struct mmap_call);
1172                 }
1173         }
1174         trace("mmap of fd %i %s:%u -> %p (opener = %p)\n",
1175               fd, file, line, addr, call.opener);
1176         errno = p->error;
1177         return p->u.mmap.ret;
1178 }
1179
1180 static void cleanup_pipe(struct pipe_call *call, bool restore)
1181 {
1182         trace("cleaning up pipe fd=%i%s,%i%s\n",
1183               call->fds[0], call->closed[0] ? "(already closed)" : "",
1184               call->fds[1], call->closed[1] ? "(already closed)" : "");
1185         if (!call->closed[0])
1186                 close(call->fds[0]);
1187         if (!call->closed[1])
1188                 close(call->fds[1]);
1189 }
1190
1191 int failtest_pipe(int pipefd[2], const char *file, unsigned line)
1192 {
1193         struct failtest_call *p;
1194         struct pipe_call call;
1195
1196         p = add_history(FAILTEST_PIPE, true, file, line, &call);
1197         if (should_fail(p)) {
1198                 p->u.open.ret = -1;
1199                 /* FIXME: Play with error codes? */
1200                 p->error = EMFILE;
1201         } else {
1202                 p->u.pipe.ret = pipe(p->u.pipe.fds);
1203                 p->u.pipe.closed[0] = p->u.pipe.closed[1] = false;
1204                 set_cleanup(p, cleanup_pipe, struct pipe_call);
1205         }
1206
1207         trace("pipe %s:%u -> %i,%i\n", file, line,
1208               p->u.pipe.ret ? -1 : p->u.pipe.fds[0],
1209               p->u.pipe.ret ? -1 : p->u.pipe.fds[1]);
1210
1211         /* This causes valgrind to notice if they use pipefd[] after failure */
1212         memcpy(pipefd, p->u.pipe.fds, sizeof(p->u.pipe.fds));
1213         errno = p->error;
1214         return p->u.pipe.ret;
1215 }
1216
1217 static void cleanup_read(struct read_call *call, bool restore)
1218 {
1219         if (restore) {
1220                 trace("cleaning up read on fd %i: seeking to %llu\n",
1221                       call->fd, (long long)call->off);
1222
1223                 /* Read (not readv!) moves file offset! */
1224                 if (lseek(call->fd, call->off, SEEK_SET) != call->off) {
1225                         fwarn("Restoring lseek pointer failed (read)");
1226                 }
1227         }
1228 }
1229
1230 static ssize_t failtest_add_read(int fd, void *buf, size_t count, off_t off,
1231                                  bool is_pread, const char *file, unsigned line)
1232 {
1233         struct failtest_call *p;
1234         struct read_call call;
1235         call.fd = fd;
1236         call.buf = buf;
1237         call.count = count;
1238         call.off = off;
1239         p = add_history(FAILTEST_READ, false, file, line, &call);
1240
1241         /* FIXME: Try partial read returns. */
1242         if (should_fail(p)) {
1243                 p->u.read.ret = -1;
1244                 p->error = EIO;
1245         } else {
1246                 if (is_pread)
1247                         p->u.read.ret = pread(fd, buf, count, off);
1248                 else {
1249                         p->u.read.ret = read(fd, buf, count);
1250                         if (p->u.read.ret != -1)
1251                                 set_cleanup(p, cleanup_read, struct read_call);
1252                 }
1253         }
1254         trace("%sread %s:%u fd %i %zu@%llu -> %i\n",
1255               is_pread ? "p" : "", file, line, fd, count, (long long)off,
1256               p->u.read.ret);
1257         errno = p->error;
1258         return p->u.read.ret;
1259 }
1260
1261 static void cleanup_write(struct write_call *write, bool restore)
1262 {
1263         trace("cleaning up write on %s\n", write->opener->u.open.pathname);
1264         if (restore)
1265                 restore_contents(write->opener, write->saved, !write->is_pwrite,
1266                                  "write");
1267         free(write->saved);
1268 }
1269
1270 static ssize_t failtest_add_write(int fd, const void *buf,
1271                                   size_t count, off_t off,
1272                                   bool is_pwrite,
1273                                   const char *file, unsigned line)
1274 {
1275         struct failtest_call *p;
1276         struct write_call call;
1277
1278         call.fd = fd;
1279         call.buf = buf;
1280         call.count = count;
1281         call.off = off;
1282         call.is_pwrite = is_pwrite;
1283         call.opener = opener_of(fd);
1284         p = add_history(FAILTEST_WRITE, false, file, line, &call);
1285
1286         /* If we're a child, we need to make sure we write the same thing
1287          * to non-files as the parent does, so tell it. */
1288         if (control_fd != -1 && off == (off_t)-1) {
1289                 enum info_type type = WRITE;
1290
1291                 write_all(control_fd, &type, sizeof(type));
1292                 write_all(control_fd, &p->u.write, sizeof(p->u.write));
1293                 write_all(control_fd, buf, count);
1294         }
1295
1296         /* FIXME: Try partial write returns. */
1297         if (should_fail(p)) {
1298                 p->u.write.ret = -1;
1299                 p->error = EIO;
1300         } else {
1301                 bool is_file;
1302                 assert(call.opener == p->u.write.opener);
1303
1304                 if (p->u.write.opener) {
1305                         is_file = (p->u.write.opener->type == FAILTEST_OPEN);
1306                 } else {
1307                         /* We can't unwind it, so at least check same
1308                          * in parent and child. */
1309                         is_file = false;
1310                 }
1311
1312                 /* FIXME: We assume same write order in parent and child */
1313                 if (!is_file && child_writes_num != 0) {
1314                         if (child_writes[0].fd != fd)
1315                                 errx(1, "Child wrote to fd %u, not %u?",
1316                                      child_writes[0].fd, fd);
1317                         if (child_writes[0].off != p->u.write.off)
1318                                 errx(1, "Child wrote to offset %zu, not %zu?",
1319                                      (size_t)child_writes[0].off,
1320                                      (size_t)p->u.write.off);
1321                         if (child_writes[0].count != count)
1322                                 errx(1, "Child wrote length %zu, not %zu?",
1323                                      child_writes[0].count, count);
1324                         if (memcmp(child_writes[0].buf, buf, count)) {
1325                                 child_fail(NULL, 0,
1326                                            "Child wrote differently to"
1327                                            " fd %u than we did!\n", fd);
1328                         }
1329                         free((char *)child_writes[0].buf);
1330                         child_writes_num--;
1331                         memmove(&child_writes[0], &child_writes[1],
1332                                 sizeof(child_writes[0]) * child_writes_num);
1333
1334                         /* Child wrote it already. */
1335                         trace("write %s:%i on fd %i already done by child\n",
1336                               file, line, fd);
1337                         p->u.write.ret = count;
1338                         errno = p->error;
1339                         return p->u.write.ret;
1340                 }
1341
1342                 if (is_file) {
1343                         p->u.write.saved = save_contents(call.opener->u.open.pathname,
1344                                                          fd, count, off,
1345                                                          "being overwritten");
1346                         set_cleanup(p, cleanup_write, struct write_call);
1347                 }
1348
1349                 /* Though off is current seek ptr for write case, we need to
1350                  * move it.  write() does that for us. */
1351                 if (p->u.write.is_pwrite)
1352                         p->u.write.ret = pwrite(fd, buf, count, off);
1353                 else
1354                         p->u.write.ret = write(fd, buf, count);
1355         }
1356         trace("%swrite %s:%i %zu@%llu on fd %i -> %i\n",
1357               p->u.write.is_pwrite ? "p" : "",
1358               file, line, count, (long long)off, fd, p->u.write.ret);
1359         errno = p->error;
1360         return p->u.write.ret;
1361 }
1362
1363 ssize_t failtest_pwrite(int fd, const void *buf, size_t count, off_t offset,
1364                         const char *file, unsigned line)
1365 {
1366         return failtest_add_write(fd, buf, count, offset, true, file, line);
1367 }
1368
1369 ssize_t failtest_write(int fd, const void *buf, size_t count,
1370                        const char *file, unsigned line)
1371 {
1372         return failtest_add_write(fd, buf, count, lseek(fd, 0, SEEK_CUR), false,
1373                                   file, line);
1374 }
1375
1376 ssize_t failtest_pread(int fd, void *buf, size_t count, off_t off,
1377                        const char *file, unsigned line)
1378 {
1379         return failtest_add_read(fd, buf, count, off, true, file, line);
1380 }
1381
1382 ssize_t failtest_read(int fd, void *buf, size_t count,
1383                       const char *file, unsigned line)
1384 {
1385         return failtest_add_read(fd, buf, count, lseek(fd, 0, SEEK_CUR), false,
1386                                  file, line);
1387 }
1388
1389 static struct lock_info *WARN_UNUSED_RESULT
1390 add_lock(struct lock_info *locks, int fd, off_t start, off_t end, int type)
1391 {
1392         unsigned int i;
1393         struct lock_info *l;
1394
1395         for (i = 0; i < lock_num; i++) {
1396                 l = &locks[i];
1397
1398                 if (l->fd != fd)
1399                         continue;
1400                 /* Four cases we care about:
1401                  * Start overlap:
1402                  *      l =    |      |
1403                  *      new = |   |
1404                  * Mid overlap:
1405                  *      l =    |      |
1406                  *      new =    |  |
1407                  * End overlap:
1408                  *      l =    |      |
1409                  *      new =      |    |
1410                  * Total overlap:
1411                  *      l =    |      |
1412                  *      new = |         |
1413                  */
1414                 if (start > l->start && end < l->end) {
1415                         /* Mid overlap: trim entry, add new one. */
1416                         off_t new_start, new_end;
1417                         new_start = end + 1;
1418                         new_end = l->end;
1419                         trace("splitting lock on fd %i from %llu-%llu"
1420                               " to %llu-%llu\n",
1421                               fd, (long long)l->start, (long long)l->end,
1422                               (long long)l->start, (long long)start - 1);
1423                         l->end = start - 1;
1424                         locks = add_lock(locks,
1425                                          fd, new_start, new_end, l->type);
1426                         l = &locks[i];
1427                 } else if (start <= l->start && end >= l->end) {
1428                         /* Total overlap: eliminate entry. */
1429                         trace("erasing lock on fd %i %llu-%llu\n",
1430                               fd, (long long)l->start, (long long)l->end);
1431                         l->end = 0;
1432                         l->start = 1;
1433                 } else if (end >= l->start && end < l->end) {
1434                         trace("trimming lock on fd %i from %llu-%llu"
1435                               " to %llu-%llu\n",
1436                               fd, (long long)l->start, (long long)l->end,
1437                               (long long)end + 1, (long long)l->end);
1438                         /* Start overlap: trim entry. */
1439                         l->start = end + 1;
1440                 } else if (start > l->start && start <= l->end) {
1441                         trace("trimming lock on fd %i from %llu-%llu"
1442                               " to %llu-%llu\n",
1443                               fd, (long long)l->start, (long long)l->end,
1444                               (long long)l->start, (long long)start - 1);
1445                         /* End overlap: trim entry. */
1446                         l->end = start-1;
1447                 }
1448                 /* Nothing left?  Remove it. */
1449                 if (l->end < l->start) {
1450                         trace("forgetting lock on fd %i\n", fd);
1451                         memmove(l, l + 1, (--lock_num - i) * sizeof(l[0]));
1452                         i--;
1453                 }
1454         }
1455
1456         if (type != F_UNLCK) {
1457                 locks = realloc(locks, (lock_num + 1) * sizeof(*locks));
1458                 l = &locks[lock_num++];
1459                 l->fd = fd;
1460                 l->start = start;
1461                 l->end = end;
1462                 l->type = type;
1463                 trace("new lock on fd %i %llu-%llu\n",
1464                       fd, (long long)l->start, (long long)l->end);
1465         }
1466         return locks;
1467 }
1468
1469 /* We trap this so we can record it: we don't fail it. */
1470 int failtest_close(int fd, const char *file, unsigned line)
1471 {
1472         struct close_call call;
1473         struct failtest_call *p, *opener;
1474
1475         /* Do this before we add ourselves to history! */
1476         opener = opener_of(fd);
1477
1478         call.fd = fd;
1479         p = add_history(FAILTEST_CLOSE, false, file, line, &call);
1480         p->fail = false;
1481
1482         /* Consume close from failpath (shouldn't tell us to fail). */
1483         if (following_path()) {
1484                 if (follow_path(p))
1485                         abort();
1486         }
1487
1488         trace("close on fd %i\n", fd);
1489         if (fd < 0)
1490                 return close(fd);
1491
1492         /* Mark opener as not leaking, remove its cleanup function. */
1493         if (opener) {
1494                 trace("close on fd %i found opener %p\n", fd, opener);
1495                 if (opener->type == FAILTEST_PIPE) {
1496                         /* From a pipe? */
1497                         if (opener->u.pipe.fds[0] == fd) {
1498                                 assert(!opener->u.pipe.closed[0]);
1499                                 opener->u.pipe.closed[0] = true;
1500                         } else if (opener->u.pipe.fds[1] == fd) {
1501                                 assert(!opener->u.pipe.closed[1]);
1502                                 opener->u.pipe.closed[1] = true;
1503                         } else
1504                                 abort();
1505                         opener->can_leak = (!opener->u.pipe.closed[0]
1506                                             || !opener->u.pipe.closed[1]);
1507                 } else if (opener->type == FAILTEST_OPEN) {
1508                         opener->u.open.closed = true;
1509                         opener->can_leak = false;
1510                 } else
1511                         abort();
1512         }
1513
1514         /* Restore offset now, in case parent shared (can't do after close!). */
1515         if (control_fd != -1) {
1516                 struct failtest_call *i;
1517
1518                 tlist_for_each_rev(&history, i, list) {
1519                         if (i == our_history_start)
1520                                 break;
1521                         if (i == opener)
1522                                 break;
1523                         if (i->type == FAILTEST_LSEEK && i->u.lseek.fd == fd) {
1524                                 trace("close on fd %i undoes lseek\n", fd);
1525                                 /* This seeks back. */
1526                                 i->cleanup(&i->u, true);
1527                                 i->cleanup = NULL;
1528                         } else if (i->type == FAILTEST_WRITE
1529                                    && i->u.write.fd == fd
1530                                    && !i->u.write.is_pwrite) {
1531                                 trace("close on fd %i undoes write"
1532                                       " offset change\n", fd);
1533                                 /* Write (not pwrite!) moves file offset! */
1534                                 if (lseek(fd, i->u.write.off, SEEK_SET)
1535                                     != i->u.write.off) {
1536                                         fwarn("Restoring lseek pointer failed (write)");
1537                                 }
1538                         } else if (i->type == FAILTEST_READ
1539                                    && i->u.read.fd == fd) {
1540                                 /* preads don't *have* cleanups */
1541                                 if (i->cleanup) {
1542                                         trace("close on fd %i undoes read"
1543                                               " offset change\n", fd);
1544                                         /* This seeks back. */
1545                                         i->cleanup(&i->u, true);
1546                                         i->cleanup = NULL;
1547                                 }
1548                         }
1549                 }
1550         }
1551
1552         /* Close unlocks everything. */
1553         locks = add_lock(locks, fd, 0, off_max(), F_UNLCK);
1554         return close(fd);
1555 }
1556
1557 /* Zero length means "to end of file" */
1558 static off_t end_of(off_t start, off_t len)
1559 {
1560         if (len == 0)
1561                 return off_max();
1562         return start + len - 1;
1563 }
1564
1565 /* FIXME: This only handles locks, really. */
1566 int failtest_fcntl(int fd, const char *file, unsigned line, int cmd, ...)
1567 {
1568         struct failtest_call *p;
1569         struct fcntl_call call;
1570         va_list ap;
1571
1572         call.fd = fd;
1573         call.cmd = cmd;
1574
1575         /* Argument extraction. */
1576         switch (cmd) {
1577         case F_SETFL:
1578         case F_SETFD:
1579                 va_start(ap, cmd);
1580                 call.arg.l = va_arg(ap, long);
1581                 va_end(ap);
1582                 trace("fcntl on fd %i F_SETFL/F_SETFD\n", fd);
1583                 return fcntl(fd, cmd, call.arg.l);
1584         case F_GETFD:
1585         case F_GETFL:
1586                 trace("fcntl on fd %i F_GETFL/F_GETFD\n", fd);
1587                 return fcntl(fd, cmd);
1588         case F_GETLK:
1589                 trace("fcntl on fd %i F_GETLK\n", fd);
1590                 get_locks();
1591                 va_start(ap, cmd);
1592                 call.arg.fl = *va_arg(ap, struct flock *);
1593                 va_end(ap);
1594                 return fcntl(fd, cmd, &call.arg.fl);
1595         case F_SETLK:
1596         case F_SETLKW:
1597                 trace("fcntl on fd %i F_SETLK%s\n",
1598                       fd, cmd == F_SETLKW ? "W" : "");
1599                 va_start(ap, cmd);
1600                 call.arg.fl = *va_arg(ap, struct flock *);
1601                 va_end(ap);
1602                 break;
1603         default:
1604                 /* This means you need to implement it here. */
1605                 err(1, "failtest: unknown fcntl %u", cmd);
1606         }
1607
1608         p = add_history(FAILTEST_FCNTL, false, file, line, &call);
1609
1610         if (should_fail(p)) {
1611                 p->u.fcntl.ret = -1;
1612                 if (p->u.fcntl.cmd == F_SETLK)
1613                         p->error = EAGAIN;
1614                 else
1615                         p->error = EDEADLK;
1616         } else {
1617                 get_locks();
1618                 p->u.fcntl.ret = fcntl(p->u.fcntl.fd, p->u.fcntl.cmd,
1619                                        &p->u.fcntl.arg.fl);
1620                 if (p->u.fcntl.ret == -1)
1621                         p->error = errno;
1622                 else {
1623                         /* We don't handle anything else yet. */
1624                         assert(p->u.fcntl.arg.fl.l_whence == SEEK_SET);
1625                         locks = add_lock(locks,
1626                                          p->u.fcntl.fd,
1627                                          p->u.fcntl.arg.fl.l_start,
1628                                          end_of(p->u.fcntl.arg.fl.l_start,
1629                                                 p->u.fcntl.arg.fl.l_len),
1630                                          p->u.fcntl.arg.fl.l_type);
1631                 }
1632         }
1633         trace("fcntl on fd %i -> %i\n", fd, p->u.fcntl.ret);
1634         errno = p->error;
1635         return p->u.fcntl.ret;
1636 }
1637
1638 static void cleanup_lseek(struct lseek_call *call, bool restore)
1639 {
1640         if (restore) {
1641                 trace("cleaning up lseek on fd %i -> %llu\n",
1642                       call->fd, (long long)call->old_off);
1643                 if (lseek(call->fd, call->old_off, SEEK_SET) != call->old_off)
1644                         fwarn("Restoring lseek pointer failed");
1645         }
1646 }
1647
1648 /* We trap this so we can undo it: we don't fail it. */
1649 off_t failtest_lseek(int fd, off_t offset, int whence, const char *file,
1650                      unsigned int line)
1651 {
1652         struct failtest_call *p;
1653         struct lseek_call call;
1654         call.fd = fd;
1655         call.offset = offset;
1656         call.whence = whence;
1657         call.old_off = lseek(fd, 0, SEEK_CUR);
1658
1659         p = add_history(FAILTEST_LSEEK, false, file, line, &call);
1660         p->fail = false;
1661
1662         /* Consume lseek from failpath. */
1663         if (failpath)
1664                 if (should_fail(p))
1665                         abort();
1666
1667         p->u.lseek.ret = lseek(fd, offset, whence);
1668
1669         if (p->u.lseek.ret != (off_t)-1)
1670                 set_cleanup(p, cleanup_lseek, struct lseek_call);
1671
1672         trace("lseek %s:%u on fd %i from %llu to %llu%s\n",
1673               file, line, fd, (long long)call.old_off, (long long)offset,
1674               whence == SEEK_CUR ? " (from current off)" :
1675               whence == SEEK_END ? " (from end)" :
1676               whence == SEEK_SET ? "" : " (invalid whence)");
1677         return p->u.lseek.ret;
1678 }
1679
1680
1681 pid_t failtest_getpid(const char *file, unsigned line)
1682 {
1683         /* You must call failtest_init first! */
1684         assert(orig_pid);
1685         return orig_pid;
1686 }
1687         
1688 void failtest_init(int argc, char *argv[])
1689 {
1690         unsigned int i;
1691
1692         orig_pid = getpid();
1693
1694         warnf = fdopen(move_fd_to_high(dup(STDERR_FILENO)), "w");
1695         for (i = 1; i < argc; i++) {
1696                 if (!strncmp(argv[i], "--failpath=", strlen("--failpath="))) {
1697                         failpath = argv[i] + strlen("--failpath=");
1698                 } else if (strcmp(argv[i], "--trace") == 0) {
1699                         tracef = warnf;
1700                         failtest_timeout_ms = -1;
1701                 } else if (!strncmp(argv[i], "--debugpath=",
1702                                     strlen("--debugpath="))) {
1703                         debugpath = argv[i] + strlen("--debugpath=");
1704                 }
1705         }
1706         failtable_init(&failtable);
1707         start = time_now();
1708 }
1709
1710 bool failtest_has_failed(void)
1711 {
1712         return control_fd != -1;
1713 }
1714
1715 void failtest_exit(int status)
1716 {
1717         trace("failtest_exit with status %i\n", status);
1718         if (failtest_exit_check) {
1719                 if (!failtest_exit_check(&history))
1720                         child_fail(NULL, 0, "failtest_exit_check failed\n");
1721         }
1722
1723         failtest_cleanup(false, status);
1724 }