]> git.ozlabs.org Git - ccan/blob - ccan/failtest/failtest.c
b394848fad79cb5dc72ac5fc94e119ffae066aa4
[ccan] / ccan / failtest / failtest.c
1 /* Licensed under LGPL - see LICENSE file for details */
2 #include <ccan/failtest/failtest.h>
3 #include <stdarg.h>
4 #include <string.h>
5 #include <stdio.h>
6 #include <stdarg.h>
7 #include <ctype.h>
8 #include <err.h>
9 #include <unistd.h>
10 #include <poll.h>
11 #include <errno.h>
12 #include <sys/types.h>
13 #include <sys/wait.h>
14 #include <sys/stat.h>
15 #include <sys/time.h>
16 #include <sys/mman.h>
17 #include <signal.h>
18 #include <assert.h>
19 #include <ccan/time/time.h>
20 #include <ccan/read_write_all/read_write_all.h>
21 #include <ccan/failtest/failtest_proto.h>
22 #include <ccan/build_assert/build_assert.h>
23 #include <ccan/hash/hash.h>
24 #include <ccan/htable/htable_type.h>
25 #include <ccan/str/str.h>
26 #include <ccan/compiler/compiler.h>
27
28 enum failtest_result (*failtest_hook)(struct tlist_calls *);
29
30 static int tracefd = -1;
31 static int traceindent = 0;
32 static int warnfd;
33
34 unsigned int failtest_timeout_ms = 20000;
35
36 const char *failpath;
37 const char *debugpath;
38
39 enum info_type {
40         WRITE,
41         RELEASE_LOCKS,
42         FAILURE,
43         SUCCESS,
44         UNEXPECTED
45 };
46
47 struct lock_info {
48         int fd;
49         /* end is inclusive: you can't have a 0-byte lock. */
50         off_t start, end;
51         int type;
52 };
53
54 /* We hash the call location together with its backtrace. */
55 static size_t hash_call(const struct failtest_call *call)
56 {
57         return hash(call->file, strlen(call->file),
58                     hash(&call->line, 1,
59                          hash(call->backtrace, call->backtrace_num,
60                               call->type)));
61 }
62
63 static bool call_eq(const struct failtest_call *call1,
64                     const struct failtest_call *call2)
65 {
66         unsigned int i;
67
68         if (strcmp(call1->file, call2->file) != 0
69             || call1->line != call2->line
70             || call1->type != call2->type
71             || call1->backtrace_num != call2->backtrace_num)
72                 return false;
73
74         for (i = 0; i < call1->backtrace_num; i++)
75                 if (call1->backtrace[i] != call2->backtrace[i])
76                         return false;
77
78         return true;
79 }
80
81 /* Defines struct failtable. */
82 HTABLE_DEFINE_TYPE(struct failtest_call, (struct failtest_call *), hash_call,
83                    call_eq, failtable);
84
85 bool (*failtest_exit_check)(struct tlist_calls *history);
86
87 /* The entire history of all calls. */
88 static struct tlist_calls history = TLIST_INIT(history);
89 /* If we're a child, the fd two write control info to the parent. */
90 static int control_fd = -1;
91 /* If we're a child, this is the first call we did ourselves. */
92 static struct failtest_call *our_history_start = NULL;
93 /* For printing runtime with --trace. */
94 static struct timeval start;
95 /* Set when failtest_hook returns FAIL_PROBE */
96 static bool probing = false;
97 /* Table to track duplicates. */
98 static struct failtable failtable;
99
100 /* Array of writes which our child did.  We report them on failure. */
101 static struct write_call *child_writes = NULL;
102 static unsigned int child_writes_num = 0;
103
104 /* fcntl locking info. */
105 static pid_t lock_owner;
106 static struct lock_info *locks = NULL;
107 static unsigned int lock_num = 0;
108
109 /* Our original pid, which we return to anyone who asks. */
110 static pid_t orig_pid;
111
112 /* Mapping from failtest_type to char. */
113 static const char info_to_arg[] = "mceoxprwfal";
114
115 /* Dummy call used for failtest_undo wrappers. */
116 static struct failtest_call unrecorded_call;
117
118 struct contents_saved {
119         size_t count;
120         off_t off;
121         off_t old_len;
122         char contents[1];
123 };
124
125 /* File contents, saved in this child only. */
126 struct saved_mmapped_file {
127         struct saved_mmapped_file *next;
128         struct failtest_call *opener;
129         struct contents_saved *s;
130 };
131
132 static struct saved_mmapped_file *saved_mmapped_files;
133
134 #if HAVE_BACKTRACE
135 #include <execinfo.h>
136
137 static void **get_backtrace(unsigned int *num)
138 {
139         static unsigned int max_back = 100;
140         void **ret;
141
142 again:
143         ret = malloc(max_back * sizeof(void *));
144         *num = backtrace(ret, max_back);
145         if (*num == max_back) {
146                 free(ret);
147                 max_back *= 2;
148                 goto again;
149         }
150         return ret;
151 }
152 #else
153 /* This will test slightly less, since will consider all of the same
154  * calls as identical.  But, it's slightly faster! */
155 static void **get_backtrace(unsigned int *num)
156 {
157         *num = 0;
158         return NULL;
159 }
160 #endif /* HAVE_BACKTRACE */
161
162 static struct failtest_call *add_history_(enum failtest_call_type type,
163                                           bool can_leak,
164                                           const char *file,
165                                           unsigned int line,
166                                           const void *elem,
167                                           size_t elem_size)
168 {
169         struct failtest_call *call;
170
171         /* NULL file is how we suppress failure. */
172         if (!file)
173                 return &unrecorded_call;
174
175         call = malloc(sizeof *call);
176         call->type = type;
177         call->can_leak = can_leak;
178         call->file = file;
179         call->line = line;
180         call->cleanup = NULL;
181         call->backtrace = get_backtrace(&call->backtrace_num);
182         memcpy(&call->u, elem, elem_size);
183         tlist_add_tail(&history, call, list);
184         return call;
185 }
186
187 #define add_history(type, can_leak, file, line, elem)           \
188         add_history_((type), (can_leak), (file), (line), (elem), sizeof(*(elem)))
189
190 /* We do a fake call inside a sizeof(), to check types. */
191 #define set_cleanup(call, clean, type)                  \
192         (call)->cleanup = (void *)((void)sizeof(clean((type *)NULL, false),1), (clean))
193
194 /* Dup the fd to a high value (out of the way I hope!), and close the old fd. */
195 static int move_fd_to_high(int fd)
196 {
197         int i;
198
199         for (i = FD_SETSIZE - 1; i >= 0; i--) {
200                 if (fcntl(i, F_GETFL) == -1 && errno == EBADF) {
201                         if (dup2(fd, i) == -1)
202                                 err(1, "Failed to dup fd %i to %i", fd, i);
203                         close(fd);
204                         return i;
205                 }
206         }
207         /* Nothing?  Really?  Er... ok? */
208         return fd;
209 }
210
211 static bool read_write_info(int fd)
212 {
213         struct write_call *w;
214         char *buf;
215
216         /* We don't need all of this, but it's simple. */
217         child_writes = realloc(child_writes,
218                                (child_writes_num+1) * sizeof(child_writes[0]));
219         w = &child_writes[child_writes_num];
220         if (!read_all(fd, w, sizeof(*w)))
221                 return false;
222
223         w->buf = buf = malloc(w->count);
224         if (!read_all(fd, buf, w->count))
225                 return false;
226
227         child_writes_num++;
228         return true;
229 }
230
231 static char *failpath_string(void)
232 {
233         struct failtest_call *i;
234         char *ret = strdup("");
235         unsigned len = 0;
236
237         /* Inefficient, but who cares? */
238         tlist_for_each(&history, i, list) {
239                 ret = realloc(ret, len + 2);
240                 ret[len] = info_to_arg[i->type];
241                 if (i->fail)
242                         ret[len] = toupper(ret[len]);
243                 ret[++len] = '\0';
244         }
245         return ret;
246 }
247
248 static void warn_via_fd(int e, const char *fmt, va_list ap)
249 {
250         char *p = failpath_string();
251
252         vdprintf(warnfd, fmt, ap);
253         if (e != -1)
254                 dprintf(warnfd, ": %s", strerror(e));
255         dprintf(warnfd, " [%s]\n", p);
256         free(p);
257 }
258
259 static void fwarn(const char *fmt, ...)
260 {
261         va_list ap;
262         int e = errno;
263
264         va_start(ap, fmt);
265         warn_via_fd(e, fmt, ap);
266         va_end(ap);
267 }
268
269
270 static void fwarnx(const char *fmt, ...)
271 {
272         va_list ap;
273
274         va_start(ap, fmt);
275         warn_via_fd(-1, fmt, ap);
276         va_end(ap);
277 }
278
279 static void tell_parent(enum info_type type)
280 {
281         if (control_fd != -1)
282                 write_all(control_fd, &type, sizeof(type));
283 }
284
285 static void child_fail(const char *out, size_t outlen, const char *fmt, ...)
286 {
287         va_list ap;
288         char *path = failpath_string();
289
290         va_start(ap, fmt);
291         vfprintf(stderr, fmt, ap);
292         va_end(ap);
293
294         fprintf(stderr, "%.*s", (int)outlen, out);
295         printf("To reproduce: --failpath=%s\n", path);
296         free(path);
297         tell_parent(FAILURE);
298         exit(1);
299 }
300
301 static void PRINTF_FMT(1, 2) trace(const char *fmt, ...)
302 {
303         va_list ap;
304         unsigned int i;
305
306         if (tracefd == -1)
307                 return;
308
309         for (i = 0; i < traceindent; i++)
310                 dprintf(tracefd, "  ");
311
312         dprintf(tracefd, "%u: ", getpid());
313         va_start(ap, fmt);
314         vdprintf(tracefd, fmt, ap);
315         va_end(ap);
316 }
317
318 static pid_t child;
319
320 static void hand_down(int signum)
321 {
322         kill(child, signum);
323 }
324
325 static void release_locks(void)
326 {
327         /* Locks were never acquired/reacquired? */
328         if (lock_owner == 0)
329                 return;
330
331         /* We own them?  Release them all. */
332         if (lock_owner == getpid()) {
333                 unsigned int i;
334                 struct flock fl;
335                 fl.l_type = F_UNLCK;
336                 fl.l_whence = SEEK_SET;
337                 fl.l_start = 0;
338                 fl.l_len = 0;
339
340                 trace("Releasing %u locks\n", lock_num);
341                 for (i = 0; i < lock_num; i++)
342                         fcntl(locks[i].fd, F_SETLK, &fl);
343         } else {
344                 /* Our parent must have them; pass request up. */
345                 enum info_type type = RELEASE_LOCKS;
346                 assert(control_fd != -1);
347                 write_all(control_fd, &type, sizeof(type));
348         }
349         lock_owner = 0;
350 }
351
352 /* off_t is a signed type.  Getting its max is non-trivial. */
353 static off_t off_max(void)
354 {
355         BUILD_ASSERT(sizeof(off_t) == 4 || sizeof(off_t) == 8);
356         if (sizeof(off_t) == 4)
357                 return (off_t)0x7FFFFFF;
358         else
359                 return (off_t)0x7FFFFFFFFFFFFFFULL;
360 }
361
362 static void get_locks(void)
363 {
364         unsigned int i;
365         struct flock fl;
366
367         if (lock_owner == getpid())
368                 return;
369
370         if (lock_owner != 0) {
371                 enum info_type type = RELEASE_LOCKS;
372                 assert(control_fd != -1);
373                 trace("Asking parent to release locks\n");
374                 write_all(control_fd, &type, sizeof(type));
375         }
376
377         fl.l_whence = SEEK_SET;
378
379         for (i = 0; i < lock_num; i++) {
380                 fl.l_type = locks[i].type;
381                 fl.l_start = locks[i].start;
382                 if (locks[i].end == off_max())
383                         fl.l_len = 0;
384                 else
385                         fl.l_len = locks[i].end - locks[i].start + 1;
386
387                 if (fcntl(locks[i].fd, F_SETLKW, &fl) != 0)
388                         abort();
389         }
390         trace("Acquired %u locks\n", lock_num);
391         lock_owner = getpid();
392 }
393
394
395 static struct contents_saved *save_contents(const char *filename,
396                                             int fd, size_t count, off_t off,
397                                             const char *why)
398 {
399         struct contents_saved *s = malloc(sizeof(*s) + count);
400         ssize_t ret;
401
402         s->off = off;
403
404         ret = pread(fd, s->contents, count, off);
405         if (ret < 0) {
406                 fwarn("failtest_write: failed to save old contents!");
407                 s->count = 0;
408         } else
409                 s->count = ret;
410
411         /* Use lseek to get the size of file, but we have to restore
412          * file offset */
413         off = lseek(fd, 0, SEEK_CUR);
414         s->old_len = lseek(fd, 0, SEEK_END);
415         lseek(fd, off, SEEK_SET);
416
417         trace("Saving %p %s %zu@%llu after %s (filelength %llu) via fd %i\n",
418               s, filename, s->count, (long long)s->off, why,
419               (long long)s->old_len, fd);
420         return s;
421 }
422
423 static void restore_contents(struct failtest_call *opener,
424                              struct contents_saved *s,
425                              bool restore_offset,
426                              const char *caller)
427 {
428         int fd;
429
430         /* The top parent doesn't need to restore. */
431         if (control_fd == -1)
432                 return;
433
434         /* Has the fd been closed? */
435         if (opener->u.open.closed) {
436                 /* Reopen, replace fd, close silently as we clean up. */
437                 fd = open(opener->u.open.pathname, O_RDWR);
438                 if (fd < 0) {
439                         fwarn("failtest: could not reopen %s to clean up %s!",
440                               opener->u.open.pathname, caller);
441                         return;
442                 }
443                 /* Make it clearly distinguisable from a "normal" fd. */
444                 fd = move_fd_to_high(fd);
445                 trace("Reopening %s to restore it (was fd %i, now %i)\n",
446                       opener->u.open.pathname, opener->u.open.ret, fd);
447                 opener->u.open.ret = fd;
448                 opener->u.open.closed = false;
449         }
450         fd = opener->u.open.ret;
451
452         trace("Restoring %p %s %zu@%llu after %s (filelength %llu) via fd %i\n",
453               s, opener->u.open.pathname, s->count, (long long)s->off, caller,
454               (long long)s->old_len, fd);
455         if (pwrite(fd, s->contents, s->count, s->off) != s->count) {
456                 fwarn("failtest: write failed cleaning up %s for %s!",
457                       opener->u.open.pathname, caller);
458         }
459
460         if (ftruncate(fd, s->old_len) != 0) {
461                 fwarn("failtest_write: truncate failed cleaning up %s for %s!",
462                       opener->u.open.pathname, caller);
463         }
464
465         if (restore_offset) {
466                 trace("Restoring offset of fd %i to %llu\n",
467                       fd, (long long)s->off);
468                 lseek(fd, s->off, SEEK_SET);
469         }
470 }
471
472 /* We save/restore most things on demand, but always do mmaped files. */
473 static void save_mmapped_files(void)
474 {
475         struct failtest_call *i;
476         trace("Saving mmapped files in child\n");
477
478         tlist_for_each_rev(&history, i, list) {
479                 struct mmap_call *m = &i->u.mmap;
480                 struct saved_mmapped_file *s;
481
482                 if (i->type != FAILTEST_MMAP)
483                         continue;
484
485                 /* FIXME: We only handle mmapped files where fd is still open. */
486                 if (m->opener->u.open.closed)
487                         continue;
488
489                 s = malloc(sizeof *s);
490                 s->s = save_contents(m->opener->u.open.pathname,
491                                      m->fd, m->length, m->offset,
492                                      "mmapped file before fork");
493                 s->opener = m->opener;
494                 s->next = saved_mmapped_files;
495                 saved_mmapped_files = s;
496         }
497 }
498
499 static void free_mmapped_files(bool restore)
500 {
501         trace("%s mmapped files in child\n",
502               restore ? "Restoring" : "Discarding");
503         while (saved_mmapped_files) {
504                 struct saved_mmapped_file *next = saved_mmapped_files->next;
505                 if (restore)
506                         restore_contents(saved_mmapped_files->opener,
507                                          saved_mmapped_files->s, false,
508                                          "saved mmap");
509                 free(saved_mmapped_files->s);
510                 free(saved_mmapped_files);
511                 saved_mmapped_files = next;
512         }
513 }
514
515 /* Returns a FAILTEST_OPEN, FAILTEST_PIPE or NULL. */
516 static struct failtest_call *opener_of(int fd)
517 {
518         struct failtest_call *i;
519
520         /* Don't get confused and match genuinely failed opens. */
521         if (fd < 0)
522                 return NULL;
523
524         /* Figure out the set of live fds. */
525         tlist_for_each_rev(&history, i, list) {
526                 if (i->fail)
527                         continue;
528                 switch (i->type) {
529                 case FAILTEST_CLOSE:
530                         if (i->u.close.fd == fd) {
531                                 return NULL;
532                         }
533                         break;
534                 case FAILTEST_OPEN:
535                         if (i->u.open.ret == fd) {
536                                 if (i->u.open.closed)
537                                         return NULL;
538                                 return i;
539                         }
540                         break;
541                 case FAILTEST_PIPE:
542                         if (i->u.pipe.fds[0] == fd || i->u.pipe.fds[1] == fd) {
543                                 return i;
544                         }
545                         break;
546                 default:
547                         break;
548                 }
549         }
550
551         /* FIXME: socket, dup, etc are untracked! */
552         return NULL;
553 }
554
555 static void free_call(struct failtest_call *call)
556 {
557         /* We don't do this in cleanup: needed even for failed opens. */
558         if (call->type == FAILTEST_OPEN)
559                 free((char *)call->u.open.pathname);
560         free(call->backtrace);
561         tlist_del_from(&history, call, list);
562         free(call);
563 }
564
565 /* Free up memory, so valgrind doesn't report leaks. */
566 static void free_everything(void)
567 {
568         struct failtest_call *i;
569
570         while ((i = tlist_top(&history, struct failtest_call, list)) != NULL)
571                 free_call(i);
572
573         failtable_clear(&failtable);
574 }
575
576 static NORETURN void failtest_cleanup(bool forced_cleanup, int status)
577 {
578         struct failtest_call *i;
579         bool restore = true;
580
581         /* For children, we don't care if they "failed" the testing. */
582         if (control_fd != -1)
583                 status = 0;
584         else
585                 /* We don't restore contents for original parent. */
586                 restore = false;
587
588         /* Cleanup everything, in reverse order. */
589         tlist_for_each_rev(&history, i, list) {
590                 /* Don't restore things our parent did. */
591                 if (i == our_history_start)
592                         restore = false;
593
594                 if (i->fail)
595                         continue;
596
597                 if (i->cleanup)
598                         i->cleanup(&i->u, restore);
599
600                 /* But their program shouldn't leak, even on failure. */
601                 if (!forced_cleanup && i->can_leak) {
602                         printf("Leak at %s:%u: --failpath=%s\n",
603                                i->file, i->line, failpath_string());
604                         status = 1;
605                 }
606         }
607
608         /* Put back mmaped files the way our parent (if any) expects. */
609         free_mmapped_files(true);
610
611         free_everything();
612         if (status == 0)
613                 tell_parent(SUCCESS);
614         else
615                 tell_parent(FAILURE);
616         exit(status);
617 }
618
619 static bool following_path(void)
620 {
621         if (!failpath)
622                 return false;
623         /* + means continue after end, like normal. */
624         if (*failpath == '+') {
625                 failpath = NULL;
626                 return false;
627         }
628         return true;
629 }
630
631 static bool follow_path(struct failtest_call *call)
632 {
633         if (*failpath == '\0') {
634                 /* Continue, but don't inject errors. */
635                 return call->fail = false;
636         }
637
638         if (tolower((unsigned char)*failpath) != info_to_arg[call->type])
639                 errx(1, "Failpath expected '%s' got '%c'\n",
640                      failpath, info_to_arg[call->type]);
641         call->fail = cisupper(*(failpath++));
642                         if (call->fail)
643                                 call->can_leak = false;
644         return call->fail;
645 }
646
647 static bool should_fail(struct failtest_call *call)
648 {
649         int status;
650         int control[2], output[2];
651         enum info_type type = UNEXPECTED;
652         char *out = NULL;
653         size_t outlen = 0;
654         struct failtest_call *dup;
655
656         if (call == &unrecorded_call)
657                 return false;
658
659         if (following_path())
660                 return follow_path(call);
661
662         /* Attach debugger if they asked for it. */
663         if (debugpath) {
664                 char *path;
665
666                 /* Pretend this last call matches whatever path wanted:
667                  * keeps valgrind happy. */
668                 call->fail = cisupper(debugpath[strlen(debugpath)-1]);
669                 path = failpath_string();
670
671                 if (streq(path, debugpath)) {
672                         char str[80];
673
674                         /* Don't timeout. */
675                         signal(SIGUSR1, SIG_IGN);
676                         sprintf(str, "xterm -e gdb /proc/%d/exe %d &",
677                                 getpid(), getpid());
678                         if (system(str) == 0)
679                                 sleep(5);
680                 } else {
681                         /* Ignore last character: could be upper or lower. */
682                         path[strlen(path)-1] = '\0';
683                         if (!strstarts(debugpath, path)) {
684                                 fprintf(stderr,
685                                         "--debugpath not followed: %s\n", path);
686                                 debugpath = NULL;
687                         }
688                 }
689                 free(path);
690         }
691
692         /* Are we probing?  If so, we never fail twice. */
693         if (probing) {
694                 trace("Not failing %c due to FAIL_PROBE return\n",
695                       info_to_arg[call->type]);
696                 return call->fail = false;
697         }
698
699         /* Don't fail more than once in the same place. */
700         dup = failtable_get(&failtable, call);
701         if (dup) {
702                 trace("Not failing %c due to duplicate\n",
703                       info_to_arg[call->type]);
704                 return call->fail = false;
705         }
706
707         if (failtest_hook) {
708                 switch (failtest_hook(&history)) {
709                 case FAIL_OK:
710                         break;
711                 case FAIL_PROBE:
712                         probing = true;
713                         break;
714                 case FAIL_DONT_FAIL:
715                         trace("Not failing %c due to failhook return\n",
716                               info_to_arg[call->type]);
717                         call->fail = false;
718                         return false;
719                 default:
720                         abort();
721                 }
722         }
723
724         /* Add it to our table of calls. */
725         failtable_add(&failtable, call);
726
727         /* We're going to fail in the child. */
728         call->fail = true;
729         if (pipe(control) != 0 || pipe(output) != 0)
730                 err(1, "opening pipe");
731
732         /* Move out the way, to high fds. */
733         control[0] = move_fd_to_high(control[0]);
734         control[1] = move_fd_to_high(control[1]);
735         output[0] = move_fd_to_high(output[0]);
736         output[1] = move_fd_to_high(output[1]);
737
738         /* Prevent double-printing (in child and parent) */
739         fflush(stdout);
740         child = fork();
741         if (child == -1)
742                 err(1, "forking failed");
743
744         if (child == 0) {
745                 traceindent++;
746                 if (tracefd != -1) {
747                         struct timeval diff;
748                         const char *p;
749                         char *failpath;
750                         struct failtest_call *c;
751
752                         c = tlist_tail(&history, struct failtest_call, list);
753                         diff = time_sub(time_now(), start);
754                         failpath = failpath_string();
755                         trace("%u->%u (%u.%02u): %s (", getppid(), getpid(),
756                               (int)diff.tv_sec, (int)diff.tv_usec / 10000,
757                               failpath);
758                         free(failpath);
759                         p = strrchr(c->file, '/');
760                         if (p)
761                                 trace("%s", p+1);
762                         else
763                                 trace("%s", c->file);
764                         trace(":%u)\n", c->line);
765                 }
766                 /* From here on, we have to clean up! */
767                 our_history_start = tlist_tail(&history, struct failtest_call,
768                                                list);
769                 close(control[0]);
770                 close(output[0]);
771                 /* Don't swallow stderr if we're tracing. */
772                 if (tracefd != -1) {
773                         dup2(output[1], STDOUT_FILENO);
774                         dup2(output[1], STDERR_FILENO);
775                         if (output[1] != STDOUT_FILENO
776                             && output[1] != STDERR_FILENO)
777                                 close(output[1]);
778                 }
779                 control_fd = move_fd_to_high(control[1]);
780
781                 /* Forget any of our parent's saved files. */
782                 free_mmapped_files(false);
783
784                 /* Now, save any files we need to. */
785                 save_mmapped_files();
786
787                 /* Failed calls can't leak. */
788                 call->can_leak = false;
789
790                 return true;
791         }
792
793         signal(SIGUSR1, hand_down);
794
795         close(control[1]);
796         close(output[1]);
797
798         /* We grab output so we can display it; we grab writes so we
799          * can compare. */
800         do {
801                 struct pollfd pfd[2];
802                 int ret;
803
804                 pfd[0].fd = output[0];
805                 pfd[0].events = POLLIN|POLLHUP;
806                 pfd[1].fd = control[0];
807                 pfd[1].events = POLLIN|POLLHUP;
808
809                 if (type == SUCCESS)
810                         ret = poll(pfd, 1, failtest_timeout_ms);
811                 else
812                         ret = poll(pfd, 2, failtest_timeout_ms);
813
814                 if (ret == 0)
815                         hand_down(SIGUSR1);
816                 if (ret < 0) {
817                         if (errno == EINTR)
818                                 continue;
819                         err(1, "Poll returned %i", ret);
820                 }
821
822                 if (pfd[0].revents & POLLIN) {
823                         ssize_t len;
824
825                         out = realloc(out, outlen + 8192);
826                         len = read(output[0], out + outlen, 8192);
827                         outlen += len;
828                 } else if (type != SUCCESS && (pfd[1].revents & POLLIN)) {
829                         if (read_all(control[0], &type, sizeof(type))) {
830                                 if (type == WRITE) {
831                                         if (!read_write_info(control[0]))
832                                                 break;
833                                 } else if (type == RELEASE_LOCKS) {
834                                         release_locks();
835                                         /* FIXME: Tell them we're done... */
836                                 }
837                         }
838                 } else if (pfd[0].revents & POLLHUP) {
839                         break;
840                 }
841         } while (type != FAILURE);
842
843         close(output[0]);
844         close(control[0]);
845         waitpid(child, &status, 0);
846         if (!WIFEXITED(status)) {
847                 if (WTERMSIG(status) == SIGUSR1)
848                         child_fail(out, outlen, "Timed out");
849                 else
850                         child_fail(out, outlen, "Killed by signal %u: ",
851                                    WTERMSIG(status));
852         }
853         /* Child printed failure already, just pass up exit code. */
854         if (type == FAILURE) {
855                 fprintf(stderr, "%.*s", (int)outlen, out);
856                 tell_parent(type);
857                 exit(WEXITSTATUS(status) ? WEXITSTATUS(status) : 1);
858         }
859         if (WEXITSTATUS(status) != 0)
860                 child_fail(out, outlen, "Exited with status %i: ",
861                            WEXITSTATUS(status));
862
863         free(out);
864         signal(SIGUSR1, SIG_DFL);
865
866         /* Only child does probe. */
867         probing = false;
868
869         /* We continue onwards without failing. */
870         call->fail = false;
871         return false;
872 }
873
874 static void cleanup_calloc(struct calloc_call *call, bool restore)
875 {
876         trace("undoing calloc %p\n", call->ret);
877         free(call->ret);
878 }
879
880 void *failtest_calloc(size_t nmemb, size_t size,
881                       const char *file, unsigned line)
882 {
883         struct failtest_call *p;
884         struct calloc_call call;
885         call.nmemb = nmemb;
886         call.size = size;
887         p = add_history(FAILTEST_CALLOC, true, file, line, &call);
888
889         if (should_fail(p)) {
890                 p->u.calloc.ret = NULL;
891                 p->error = ENOMEM;
892         } else {
893                 p->u.calloc.ret = calloc(nmemb, size);
894                 set_cleanup(p, cleanup_calloc, struct calloc_call);
895         }
896         trace("calloc %zu x %zu %s:%u -> %p\n",
897               nmemb, size, file, line, p->u.calloc.ret);
898         errno = p->error;
899         return p->u.calloc.ret;
900 }
901
902 static void cleanup_malloc(struct malloc_call *call, bool restore)
903 {
904         trace("undoing malloc %p\n", call->ret);
905         free(call->ret);
906 }
907
908 void *failtest_malloc(size_t size, const char *file, unsigned line)
909 {
910         struct failtest_call *p;
911         struct malloc_call call;
912         call.size = size;
913
914         p = add_history(FAILTEST_MALLOC, true, file, line, &call);
915         if (should_fail(p)) {
916                 p->u.malloc.ret = NULL;
917                 p->error = ENOMEM;
918         } else {
919                 p->u.malloc.ret = malloc(size);
920                 set_cleanup(p, cleanup_malloc, struct malloc_call);
921         }
922         trace("malloc %zu %s:%u -> %p\n",
923               size, file, line, p->u.malloc.ret);
924         errno = p->error;
925         return p->u.malloc.ret;
926 }
927
928 static void cleanup_realloc(struct realloc_call *call, bool restore)
929 {
930         trace("undoing realloc %p\n", call->ret);
931         free(call->ret);
932 }
933
934 /* Walk back and find out if we got this ptr from a previous routine. */
935 static void fixup_ptr_history(void *ptr, const char *why)
936 {
937         struct failtest_call *i;
938
939         /* Start at end of history, work back. */
940         tlist_for_each_rev(&history, i, list) {
941                 switch (i->type) {
942                 case FAILTEST_REALLOC:
943                         if (i->u.realloc.ret == ptr) {
944                                 trace("found realloc %p %s:%u matching %s\n",
945                                       ptr, i->file, i->line, why);
946                                 i->cleanup = NULL;
947                                 i->can_leak = false;
948                                 return;
949                         }
950                         break;
951                 case FAILTEST_MALLOC:
952                         if (i->u.malloc.ret == ptr) {
953                                 trace("found malloc %p %s:%u matching %s\n",
954                                       ptr, i->file, i->line, why);
955                                 i->cleanup = NULL;
956                                 i->can_leak = false;
957                                 return;
958                         }
959                         break;
960                 case FAILTEST_CALLOC:
961                         if (i->u.calloc.ret == ptr) {
962                                 trace("found calloc %p %s:%u matching %s\n",
963                                       ptr, i->file, i->line, why);
964                                 i->cleanup = NULL;
965                                 i->can_leak = false;
966                                 return;
967                         }
968                         break;
969                 default:
970                         break;
971                 }
972         }
973         trace("Did not find %p matching %s\n", ptr, why);
974 }
975
976 void *failtest_realloc(void *ptr, size_t size, const char *file, unsigned line)
977 {
978         struct failtest_call *p;
979         struct realloc_call call;
980         call.size = size;
981         p = add_history(FAILTEST_REALLOC, true, file, line, &call);
982
983         /* FIXME: Try one child moving allocation, one not. */
984         if (should_fail(p)) {
985                 p->u.realloc.ret = NULL;
986                 p->error = ENOMEM;
987         } else {
988                 /* Don't catch this one in the history fixup... */
989                 p->u.realloc.ret = NULL;
990                 fixup_ptr_history(ptr, "realloc");
991                 p->u.realloc.ret = realloc(ptr, size);
992                 set_cleanup(p, cleanup_realloc, struct realloc_call);
993         }
994         trace("realloc %p %s:%u -> %p\n",
995               ptr, file, line, p->u.realloc.ret);
996         errno = p->error;
997         return p->u.realloc.ret;
998 }
999
1000 /* FIXME: Record free, so we can terminate fixup_ptr_history correctly.
1001  * If there's an alloc we don't see, it could get confusing if it matches
1002  * a previous allocation we did see. */
1003 void failtest_free(void *ptr)
1004 {
1005         fixup_ptr_history(ptr, "free");
1006         trace("free %p\n", ptr);
1007         free(ptr);
1008 }
1009
1010
1011 static struct contents_saved *save_file(const char *pathname)
1012 {
1013         int fd;
1014         struct contents_saved *s;
1015
1016         fd = open(pathname, O_RDONLY);
1017         if (fd < 0)
1018                 return NULL;
1019
1020         s = save_contents(pathname, fd, lseek(fd, 0, SEEK_END), 0,
1021                           "open with O_TRUNC");
1022         close(fd);
1023         return s;
1024 }
1025
1026 /* Optimization: don't create a child for an open which *we know*
1027  * would fail anyway. */
1028 static bool open_would_fail(const char *pathname, int flags)
1029 {
1030         if ((flags & O_ACCMODE) == O_RDONLY)
1031                 return access(pathname, R_OK) != 0;
1032         if (!(flags & O_CREAT)) {
1033                 if ((flags & O_ACCMODE) == O_WRONLY)
1034                         return access(pathname, W_OK) != 0;
1035                 if ((flags & O_ACCMODE) == O_RDWR)
1036                         return access(pathname, W_OK) != 0
1037                                 || access(pathname, R_OK) != 0;
1038         }
1039         /* FIXME: We could check if it exists, for O_CREAT|O_EXCL */
1040         return false;
1041 }
1042
1043 static void cleanup_open(struct open_call *call, bool restore)
1044 {
1045         if (restore && call->saved)
1046                 restore_contents(container_of(call, struct failtest_call,
1047                                               u.open),
1048                                  call->saved, false, "open with O_TRUNC");
1049         if (!call->closed) {
1050                 trace("Cleaning up open %s by closing fd %i\n",
1051                       call->pathname, call->ret);
1052                 close(call->ret);
1053                 call->closed = true;
1054         }
1055         free(call->saved);
1056 }
1057
1058 int failtest_open(const char *pathname,
1059                   const char *file, unsigned line, ...)
1060 {
1061         struct failtest_call *p;
1062         struct open_call call;
1063         va_list ap;
1064
1065         call.pathname = strdup(pathname);
1066         va_start(ap, line);
1067         call.flags = va_arg(ap, int);
1068         call.always_save = false;
1069         call.closed = false;
1070         if (call.flags & O_CREAT) {
1071                 call.mode = va_arg(ap, int);
1072                 va_end(ap);
1073         }
1074         p = add_history(FAILTEST_OPEN, true, file, line, &call);
1075         /* Avoid memory leak! */
1076         if (p == &unrecorded_call)
1077                 free((char *)call.pathname);
1078
1079         if (should_fail(p)) {
1080                 /* Don't bother inserting failures that would happen anyway. */
1081                 if (open_would_fail(pathname, call.flags)) {
1082                         trace("Open would have failed anyway: stopping\n");
1083                         failtest_cleanup(true, 0);
1084                 }
1085                 p->u.open.ret = -1;
1086                 /* FIXME: Play with error codes? */
1087                 p->error = EACCES;
1088         } else {
1089                 /* Save the old version if they're truncating it. */
1090                 if (call.flags & O_TRUNC)
1091                         p->u.open.saved = save_file(pathname);
1092                 else
1093                         p->u.open.saved = NULL;
1094                 p->u.open.ret = open(pathname, call.flags, call.mode);
1095                 if (p->u.open.ret == -1) {
1096                         p->u.open.closed = true;
1097                         p->can_leak = false;
1098                 } else {
1099                         set_cleanup(p, cleanup_open, struct open_call);
1100                 }
1101         }
1102         trace("open %s %s:%u -> %i (opener %p)\n",
1103               pathname, file, line, p->u.open.ret, &p->u.open);
1104         errno = p->error;
1105         return p->u.open.ret;
1106 }
1107
1108 static void cleanup_mmap(struct mmap_call *mmap, bool restore)
1109 {
1110         trace("cleaning up mmap @%p (opener %p)\n",
1111               mmap->ret, mmap->opener);
1112         if (restore)
1113                 restore_contents(mmap->opener, mmap->saved, false, "mmap");
1114         free(mmap->saved);
1115 }
1116
1117 void *failtest_mmap(void *addr, size_t length, int prot, int flags,
1118                     int fd, off_t offset, const char *file, unsigned line)
1119 {
1120         struct failtest_call *p;
1121         struct mmap_call call;
1122
1123         call.addr = addr;
1124         call.length = length;
1125         call.prot = prot;
1126         call.flags = flags;
1127         call.offset = offset;
1128         call.fd = fd;
1129         call.opener = opener_of(fd);
1130
1131         /* If we don't know what file it was, don't fail. */
1132         if (!call.opener) {
1133                 if (fd != -1) {
1134                         fwarnx("failtest_mmap: couldn't figure out source for"
1135                                " fd %i at %s:%u", fd, file, line);
1136                 }
1137                 addr = mmap(addr, length, prot, flags, fd, offset);
1138                 trace("mmap of fd %i -> %p (opener = NULL)\n", fd, addr);
1139                 return addr;
1140         }
1141
1142         p = add_history(FAILTEST_MMAP, false, file, line, &call);
1143         if (should_fail(p)) {
1144                 p->u.mmap.ret = MAP_FAILED;
1145                 p->error = ENOMEM;
1146         } else {
1147                 p->u.mmap.ret = mmap(addr, length, prot, flags, fd, offset);
1148                 /* Save contents if we're writing to a normal file */
1149                 if (p->u.mmap.ret != MAP_FAILED
1150                     && (prot & PROT_WRITE)
1151                     && call.opener->type == FAILTEST_OPEN) {
1152                         const char *fname = call.opener->u.open.pathname;
1153                         p->u.mmap.saved = save_contents(fname, fd, length,
1154                                                         offset, "being mmapped");
1155                         set_cleanup(p, cleanup_mmap, struct mmap_call);
1156                 }
1157         }
1158         trace("mmap of fd %i %s:%u -> %p (opener = %p)\n",
1159               fd, file, line, addr, call.opener);
1160         errno = p->error;
1161         return p->u.mmap.ret;
1162 }
1163
1164 static void cleanup_pipe(struct pipe_call *call, bool restore)
1165 {
1166         trace("cleaning up pipe fd=%i%s,%i%s\n",
1167               call->fds[0], call->closed[0] ? "(already closed)" : "",
1168               call->fds[1], call->closed[1] ? "(already closed)" : "");
1169         if (!call->closed[0])
1170                 close(call->fds[0]);
1171         if (!call->closed[1])
1172                 close(call->fds[1]);
1173 }
1174
1175 int failtest_pipe(int pipefd[2], const char *file, unsigned line)
1176 {
1177         struct failtest_call *p;
1178         struct pipe_call call;
1179
1180         p = add_history(FAILTEST_PIPE, true, file, line, &call);
1181         if (should_fail(p)) {
1182                 p->u.open.ret = -1;
1183                 /* FIXME: Play with error codes? */
1184                 p->error = EMFILE;
1185         } else {
1186                 p->u.pipe.ret = pipe(p->u.pipe.fds);
1187                 p->u.pipe.closed[0] = p->u.pipe.closed[1] = false;
1188                 set_cleanup(p, cleanup_pipe, struct pipe_call);
1189         }
1190
1191         trace("pipe %s:%u -> %i,%i\n", file, line,
1192               p->u.pipe.ret ? -1 : p->u.pipe.fds[0],
1193               p->u.pipe.ret ? -1 : p->u.pipe.fds[1]);
1194
1195         /* This causes valgrind to notice if they use pipefd[] after failure */
1196         memcpy(pipefd, p->u.pipe.fds, sizeof(p->u.pipe.fds));
1197         errno = p->error;
1198         return p->u.pipe.ret;
1199 }
1200
1201 static void cleanup_read(struct read_call *call, bool restore)
1202 {
1203         if (restore) {
1204                 trace("cleaning up read on fd %i: seeking to %llu\n",
1205                       call->fd, (long long)call->off);
1206
1207                 /* Read (not readv!) moves file offset! */
1208                 if (lseek(call->fd, call->off, SEEK_SET) != call->off) {
1209                         fwarn("Restoring lseek pointer failed (read)");
1210                 }
1211         }
1212 }
1213
1214 static ssize_t failtest_add_read(int fd, void *buf, size_t count, off_t off,
1215                                  bool is_pread, const char *file, unsigned line)
1216 {
1217         struct failtest_call *p;
1218         struct read_call call;
1219         call.fd = fd;
1220         call.buf = buf;
1221         call.count = count;
1222         call.off = off;
1223         p = add_history(FAILTEST_READ, false, file, line, &call);
1224
1225         /* FIXME: Try partial read returns. */
1226         if (should_fail(p)) {
1227                 p->u.read.ret = -1;
1228                 p->error = EIO;
1229         } else {
1230                 if (is_pread)
1231                         p->u.read.ret = pread(fd, buf, count, off);
1232                 else {
1233                         p->u.read.ret = read(fd, buf, count);
1234                         if (p->u.read.ret != -1)
1235                                 set_cleanup(p, cleanup_read, struct read_call);
1236                 }
1237         }
1238         trace("%sread %s:%u fd %i %zu@%llu -> %i\n",
1239               is_pread ? "p" : "", file, line, fd, count, (long long)off,
1240               p->u.read.ret);
1241         errno = p->error;
1242         return p->u.read.ret;
1243 }
1244
1245 static void cleanup_write(struct write_call *write, bool restore)
1246 {
1247         trace("cleaning up write on %s\n", write->opener->u.open.pathname);
1248         if (restore)
1249                 restore_contents(write->opener, write->saved, !write->is_pwrite,
1250                                  "write");
1251         free(write->saved);
1252 }
1253
1254 static ssize_t failtest_add_write(int fd, const void *buf,
1255                                   size_t count, off_t off,
1256                                   bool is_pwrite,
1257                                   const char *file, unsigned line)
1258 {
1259         struct failtest_call *p;
1260         struct write_call call;
1261
1262         call.fd = fd;
1263         call.buf = buf;
1264         call.count = count;
1265         call.off = off;
1266         call.is_pwrite = is_pwrite;
1267         call.opener = opener_of(fd);
1268         p = add_history(FAILTEST_WRITE, false, file, line, &call);
1269
1270         /* If we're a child, we need to make sure we write the same thing
1271          * to non-files as the parent does, so tell it. */
1272         if (control_fd != -1 && off == (off_t)-1) {
1273                 enum info_type type = WRITE;
1274
1275                 write_all(control_fd, &type, sizeof(type));
1276                 write_all(control_fd, &p->u.write, sizeof(p->u.write));
1277                 write_all(control_fd, buf, count);
1278         }
1279
1280         /* FIXME: Try partial write returns. */
1281         if (should_fail(p)) {
1282                 p->u.write.ret = -1;
1283                 p->error = EIO;
1284         } else {
1285                 bool is_file;
1286                 assert(call.opener == p->u.write.opener);
1287
1288                 if (p->u.write.opener) {
1289                         is_file = (p->u.write.opener->type == FAILTEST_OPEN);
1290                 } else {
1291                         /* We can't unwind it, so at least check same
1292                          * in parent and child. */
1293                         is_file = false;
1294                 }
1295
1296                 /* FIXME: We assume same write order in parent and child */
1297                 if (!is_file && child_writes_num != 0) {
1298                         if (child_writes[0].fd != fd)
1299                                 errx(1, "Child wrote to fd %u, not %u?",
1300                                      child_writes[0].fd, fd);
1301                         if (child_writes[0].off != p->u.write.off)
1302                                 errx(1, "Child wrote to offset %zu, not %zu?",
1303                                      (size_t)child_writes[0].off,
1304                                      (size_t)p->u.write.off);
1305                         if (child_writes[0].count != count)
1306                                 errx(1, "Child wrote length %zu, not %zu?",
1307                                      child_writes[0].count, count);
1308                         if (memcmp(child_writes[0].buf, buf, count)) {
1309                                 child_fail(NULL, 0,
1310                                            "Child wrote differently to"
1311                                            " fd %u than we did!\n", fd);
1312                         }
1313                         free((char *)child_writes[0].buf);
1314                         child_writes_num--;
1315                         memmove(&child_writes[0], &child_writes[1],
1316                                 sizeof(child_writes[0]) * child_writes_num);
1317
1318                         /* Child wrote it already. */
1319                         trace("write %s:%i on fd %i already done by child\n",
1320                               file, line, fd);
1321                         p->u.write.ret = count;
1322                         errno = p->error;
1323                         return p->u.write.ret;
1324                 }
1325
1326                 if (is_file) {
1327                         p->u.write.saved = save_contents(call.opener->u.open.pathname,
1328                                                          fd, count, off,
1329                                                          "being overwritten");
1330                         set_cleanup(p, cleanup_write, struct write_call);
1331                 }
1332
1333                 /* Though off is current seek ptr for write case, we need to
1334                  * move it.  write() does that for us. */
1335                 if (p->u.write.is_pwrite)
1336                         p->u.write.ret = pwrite(fd, buf, count, off);
1337                 else
1338                         p->u.write.ret = write(fd, buf, count);
1339         }
1340         trace("%swrite %s:%i %zu@%llu on fd %i -> %i\n",
1341               p->u.write.is_pwrite ? "p" : "",
1342               file, line, count, (long long)off, fd, p->u.write.ret);
1343         errno = p->error;
1344         return p->u.write.ret;
1345 }
1346
1347 ssize_t failtest_pwrite(int fd, const void *buf, size_t count, off_t offset,
1348                         const char *file, unsigned line)
1349 {
1350         return failtest_add_write(fd, buf, count, offset, true, file, line);
1351 }
1352
1353 ssize_t failtest_write(int fd, const void *buf, size_t count,
1354                        const char *file, unsigned line)
1355 {
1356         return failtest_add_write(fd, buf, count, lseek(fd, 0, SEEK_CUR), false,
1357                                   file, line);
1358 }
1359
1360 ssize_t failtest_pread(int fd, void *buf, size_t count, off_t off,
1361                        const char *file, unsigned line)
1362 {
1363         return failtest_add_read(fd, buf, count, off, true, file, line);
1364 }
1365
1366 ssize_t failtest_read(int fd, void *buf, size_t count,
1367                       const char *file, unsigned line)
1368 {
1369         return failtest_add_read(fd, buf, count, lseek(fd, 0, SEEK_CUR), false,
1370                                  file, line);
1371 }
1372
1373 static struct lock_info *WARN_UNUSED_RESULT
1374 add_lock(struct lock_info *locks, int fd, off_t start, off_t end, int type)
1375 {
1376         unsigned int i;
1377         struct lock_info *l;
1378
1379         for (i = 0; i < lock_num; i++) {
1380                 l = &locks[i];
1381
1382                 if (l->fd != fd)
1383                         continue;
1384                 /* Four cases we care about:
1385                  * Start overlap:
1386                  *      l =    |      |
1387                  *      new = |   |
1388                  * Mid overlap:
1389                  *      l =    |      |
1390                  *      new =    |  |
1391                  * End overlap:
1392                  *      l =    |      |
1393                  *      new =      |    |
1394                  * Total overlap:
1395                  *      l =    |      |
1396                  *      new = |         |
1397                  */
1398                 if (start > l->start && end < l->end) {
1399                         /* Mid overlap: trim entry, add new one. */
1400                         off_t new_start, new_end;
1401                         new_start = end + 1;
1402                         new_end = l->end;
1403                         trace("splitting lock on fd %i from %llu-%llu"
1404                               " to %llu-%llu\n",
1405                               fd, (long long)l->start, (long long)l->end,
1406                               (long long)l->start, (long long)start - 1);
1407                         l->end = start - 1;
1408                         locks = add_lock(locks,
1409                                          fd, new_start, new_end, l->type);
1410                         l = &locks[i];
1411                 } else if (start <= l->start && end >= l->end) {
1412                         /* Total overlap: eliminate entry. */
1413                         trace("erasing lock on fd %i %llu-%llu\n",
1414                               fd, (long long)l->start, (long long)l->end);
1415                         l->end = 0;
1416                         l->start = 1;
1417                 } else if (end >= l->start && end < l->end) {
1418                         trace("trimming lock on fd %i from %llu-%llu"
1419                               " to %llu-%llu\n",
1420                               fd, (long long)l->start, (long long)l->end,
1421                               (long long)end + 1, (long long)l->end);
1422                         /* Start overlap: trim entry. */
1423                         l->start = end + 1;
1424                 } else if (start > l->start && start <= l->end) {
1425                         trace("trimming lock on fd %i from %llu-%llu"
1426                               " to %llu-%llu\n",
1427                               fd, (long long)l->start, (long long)l->end,
1428                               (long long)l->start, (long long)start - 1);
1429                         /* End overlap: trim entry. */
1430                         l->end = start-1;
1431                 }
1432                 /* Nothing left?  Remove it. */
1433                 if (l->end < l->start) {
1434                         trace("forgetting lock on fd %i\n", fd);
1435                         memmove(l, l + 1, (--lock_num - i) * sizeof(l[0]));
1436                         i--;
1437                 }
1438         }
1439
1440         if (type != F_UNLCK) {
1441                 locks = realloc(locks, (lock_num + 1) * sizeof(*locks));
1442                 l = &locks[lock_num++];
1443                 l->fd = fd;
1444                 l->start = start;
1445                 l->end = end;
1446                 l->type = type;
1447                 trace("new lock on fd %i %llu-%llu\n",
1448                       fd, (long long)l->start, (long long)l->end);
1449         }
1450         return locks;
1451 }
1452
1453 /* We trap this so we can record it: we don't fail it. */
1454 int failtest_close(int fd, const char *file, unsigned line)
1455 {
1456         struct close_call call;
1457         struct failtest_call *p, *opener;
1458
1459         /* Do this before we add ourselves to history! */
1460         opener = opener_of(fd);
1461
1462         call.fd = fd;
1463         p = add_history(FAILTEST_CLOSE, false, file, line, &call);
1464         p->fail = false;
1465
1466         /* Consume close from failpath (shouldn't tell us to fail). */
1467         if (following_path()) {
1468                 if (follow_path(p))
1469                         abort();
1470         }
1471
1472         trace("close on fd %i\n", fd);
1473         if (fd < 0)
1474                 return close(fd);
1475
1476         /* Mark opener as not leaking, remove its cleanup function. */
1477         if (opener) {
1478                 trace("close on fd %i found opener %p\n", fd, opener);
1479                 if (opener->type == FAILTEST_PIPE) {
1480                         /* From a pipe? */
1481                         if (opener->u.pipe.fds[0] == fd) {
1482                                 assert(!opener->u.pipe.closed[0]);
1483                                 opener->u.pipe.closed[0] = true;
1484                         } else if (opener->u.pipe.fds[1] == fd) {
1485                                 assert(!opener->u.pipe.closed[1]);
1486                                 opener->u.pipe.closed[1] = true;
1487                         } else
1488                                 abort();
1489                         opener->can_leak = (!opener->u.pipe.closed[0]
1490                                             || !opener->u.pipe.closed[1]);
1491                 } else if (opener->type == FAILTEST_OPEN) {
1492                         opener->u.open.closed = true;
1493                         opener->can_leak = false;
1494                 } else
1495                         abort();
1496         }
1497
1498         /* Restore offset now, in case parent shared (can't do after close!). */
1499         if (control_fd != -1) {
1500                 struct failtest_call *i;
1501
1502                 tlist_for_each_rev(&history, i, list) {
1503                         if (i == our_history_start)
1504                                 break;
1505                         if (i == opener)
1506                                 break;
1507                         if (i->type == FAILTEST_LSEEK && i->u.lseek.fd == fd) {
1508                                 trace("close on fd %i undoes lseek\n", fd);
1509                                 /* This seeks back. */
1510                                 i->cleanup(&i->u, true);
1511                                 i->cleanup = NULL;
1512                         } else if (i->type == FAILTEST_WRITE
1513                                    && i->u.write.fd == fd
1514                                    && !i->u.write.is_pwrite) {
1515                                 trace("close on fd %i undoes write"
1516                                       " offset change\n", fd);
1517                                 /* Write (not pwrite!) moves file offset! */
1518                                 if (lseek(fd, i->u.write.off, SEEK_SET)
1519                                     != i->u.write.off) {
1520                                         fwarn("Restoring lseek pointer failed (write)");
1521                                 }
1522                         } else if (i->type == FAILTEST_READ
1523                                    && i->u.read.fd == fd) {
1524                                 /* preads don't *have* cleanups */
1525                                 if (i->cleanup) {
1526                                         trace("close on fd %i undoes read"
1527                                               " offset change\n", fd);
1528                                         /* This seeks back. */
1529                                         i->cleanup(&i->u, true);
1530                                         i->cleanup = NULL;
1531                                 }
1532                         }
1533                 }
1534         }
1535
1536         /* Close unlocks everything. */
1537         locks = add_lock(locks, fd, 0, off_max(), F_UNLCK);
1538         return close(fd);
1539 }
1540
1541 /* Zero length means "to end of file" */
1542 static off_t end_of(off_t start, off_t len)
1543 {
1544         if (len == 0)
1545                 return off_max();
1546         return start + len - 1;
1547 }
1548
1549 /* FIXME: This only handles locks, really. */
1550 int failtest_fcntl(int fd, const char *file, unsigned line, int cmd, ...)
1551 {
1552         struct failtest_call *p;
1553         struct fcntl_call call;
1554         va_list ap;
1555
1556         call.fd = fd;
1557         call.cmd = cmd;
1558
1559         /* Argument extraction. */
1560         switch (cmd) {
1561         case F_SETFL:
1562         case F_SETFD:
1563                 va_start(ap, cmd);
1564                 call.arg.l = va_arg(ap, long);
1565                 va_end(ap);
1566                 trace("fcntl on fd %i F_SETFL/F_SETFD\n", fd);
1567                 return fcntl(fd, cmd, call.arg.l);
1568         case F_GETFD:
1569         case F_GETFL:
1570                 trace("fcntl on fd %i F_GETFL/F_GETFD\n", fd);
1571                 return fcntl(fd, cmd);
1572         case F_GETLK:
1573                 trace("fcntl on fd %i F_GETLK\n", fd);
1574                 get_locks();
1575                 va_start(ap, cmd);
1576                 call.arg.fl = *va_arg(ap, struct flock *);
1577                 va_end(ap);
1578                 return fcntl(fd, cmd, &call.arg.fl);
1579         case F_SETLK:
1580         case F_SETLKW:
1581                 trace("fcntl on fd %i F_SETLK%s\n",
1582                       fd, cmd == F_SETLKW ? "W" : "");
1583                 va_start(ap, cmd);
1584                 call.arg.fl = *va_arg(ap, struct flock *);
1585                 va_end(ap);
1586                 break;
1587         default:
1588                 /* This means you need to implement it here. */
1589                 err(1, "failtest: unknown fcntl %u", cmd);
1590         }
1591
1592         p = add_history(FAILTEST_FCNTL, false, file, line, &call);
1593
1594         if (should_fail(p)) {
1595                 p->u.fcntl.ret = -1;
1596                 if (p->u.fcntl.cmd == F_SETLK)
1597                         p->error = EAGAIN;
1598                 else
1599                         p->error = EDEADLK;
1600         } else {
1601                 get_locks();
1602                 p->u.fcntl.ret = fcntl(p->u.fcntl.fd, p->u.fcntl.cmd,
1603                                        &p->u.fcntl.arg.fl);
1604                 if (p->u.fcntl.ret == -1)
1605                         p->error = errno;
1606                 else {
1607                         /* We don't handle anything else yet. */
1608                         assert(p->u.fcntl.arg.fl.l_whence == SEEK_SET);
1609                         locks = add_lock(locks,
1610                                          p->u.fcntl.fd,
1611                                          p->u.fcntl.arg.fl.l_start,
1612                                          end_of(p->u.fcntl.arg.fl.l_start,
1613                                                 p->u.fcntl.arg.fl.l_len),
1614                                          p->u.fcntl.arg.fl.l_type);
1615                 }
1616         }
1617         trace("fcntl on fd %i -> %i\n", fd, p->u.fcntl.ret);
1618         errno = p->error;
1619         return p->u.fcntl.ret;
1620 }
1621
1622 static void cleanup_lseek(struct lseek_call *call, bool restore)
1623 {
1624         if (restore) {
1625                 trace("cleaning up lseek on fd %i -> %llu\n",
1626                       call->fd, (long long)call->old_off);
1627                 if (lseek(call->fd, call->old_off, SEEK_SET) != call->old_off)
1628                         fwarn("Restoring lseek pointer failed");
1629         }
1630 }
1631
1632 /* We trap this so we can undo it: we don't fail it. */
1633 off_t failtest_lseek(int fd, off_t offset, int whence, const char *file,
1634                      unsigned int line)
1635 {
1636         struct failtest_call *p;
1637         struct lseek_call call;
1638         call.fd = fd;
1639         call.offset = offset;
1640         call.whence = whence;
1641         call.old_off = lseek(fd, 0, SEEK_CUR);
1642
1643         p = add_history(FAILTEST_LSEEK, false, file, line, &call);
1644         p->fail = false;
1645
1646         /* Consume lseek from failpath. */
1647         if (failpath)
1648                 if (should_fail(p))
1649                         abort();
1650
1651         p->u.lseek.ret = lseek(fd, offset, whence);
1652
1653         if (p->u.lseek.ret != (off_t)-1)
1654                 set_cleanup(p, cleanup_lseek, struct lseek_call);
1655
1656         trace("lseek %s:%u on fd %i from %llu to %llu%s\n",
1657               file, line, fd, (long long)call.old_off, (long long)offset,
1658               whence == SEEK_CUR ? " (from current off)" :
1659               whence == SEEK_END ? " (from end)" :
1660               whence == SEEK_SET ? "" : " (invalid whence)");
1661         return p->u.lseek.ret;
1662 }
1663
1664
1665 pid_t failtest_getpid(const char *file, unsigned line)
1666 {
1667         /* You must call failtest_init first! */
1668         assert(orig_pid);
1669         return orig_pid;
1670 }
1671         
1672 void failtest_init(int argc, char *argv[])
1673 {
1674         unsigned int i;
1675
1676         orig_pid = getpid();
1677
1678         warnfd = move_fd_to_high(dup(STDERR_FILENO));
1679         for (i = 1; i < argc; i++) {
1680                 if (!strncmp(argv[i], "--failpath=", strlen("--failpath="))) {
1681                         failpath = argv[i] + strlen("--failpath=");
1682                 } else if (strcmp(argv[i], "--trace") == 0) {
1683                         tracefd = warnfd;
1684                         failtest_timeout_ms = -1;
1685                 } else if (!strncmp(argv[i], "--debugpath=",
1686                                     strlen("--debugpath="))) {
1687                         debugpath = argv[i] + strlen("--debugpath=");
1688                 }
1689         }
1690         failtable_init(&failtable);
1691         start = time_now();
1692 }
1693
1694 bool failtest_has_failed(void)
1695 {
1696         return control_fd != -1;
1697 }
1698
1699 void failtest_exit(int status)
1700 {
1701         trace("failtest_exit with status %i\n", status);
1702         if (failtest_exit_check) {
1703                 if (!failtest_exit_check(&history))
1704                         child_fail(NULL, 0, "failtest_exit_check failed\n");
1705         }
1706
1707         failtest_cleanup(false, status);
1708 }