]> git.ozlabs.org Git - ccan/blob - ccan/failtest/failtest.c
1dad0864aa372fa3b7ead07a081f9e125f192bbc
[ccan] / ccan / failtest / failtest.c
1 /* Licensed under LGPL - see LICENSE file for details */
2 #include <ccan/failtest/failtest.h>
3 #include <stdarg.h>
4 #include <string.h>
5 #include <stdio.h>
6 #include <stdarg.h>
7 #include <ctype.h>
8 #include <err.h>
9 #include <unistd.h>
10 #include <poll.h>
11 #include <errno.h>
12 #include <sys/types.h>
13 #include <sys/wait.h>
14 #include <sys/stat.h>
15 #include <sys/time.h>
16 #include <sys/mman.h>
17 #include <signal.h>
18 #include <assert.h>
19 #include <ccan/time/time.h>
20 #include <ccan/read_write_all/read_write_all.h>
21 #include <ccan/failtest/failtest_proto.h>
22 #include <ccan/build_assert/build_assert.h>
23 #include <ccan/hash/hash.h>
24 #include <ccan/htable/htable_type.h>
25 #include <ccan/str/str.h>
26
27 enum failtest_result (*failtest_hook)(struct tlist_calls *);
28
29 static int tracefd = -1;
30 static int warnfd;
31
32 unsigned int failtest_timeout_ms = 20000;
33
34 const char *failpath;
35 const char *debugpath;
36
37 enum info_type {
38         WRITE,
39         RELEASE_LOCKS,
40         FAILURE,
41         SUCCESS,
42         UNEXPECTED
43 };
44
45 struct lock_info {
46         int fd;
47         /* end is inclusive: you can't have a 0-byte lock. */
48         off_t start, end;
49         int type;
50 };
51
52 /* We hash the call location together with its backtrace. */
53 static size_t hash_call(const struct failtest_call *call)
54 {
55         return hash(call->file, strlen(call->file),
56                     hash(&call->line, 1,
57                          hash(call->backtrace, call->backtrace_num,
58                               call->type)));
59 }
60
61 static bool call_eq(const struct failtest_call *call1,
62                     const struct failtest_call *call2)
63 {
64         unsigned int i;
65
66         if (strcmp(call1->file, call2->file) != 0
67             || call1->line != call2->line
68             || call1->type != call2->type
69             || call1->backtrace_num != call2->backtrace_num)
70                 return false;
71
72         for (i = 0; i < call1->backtrace_num; i++)
73                 if (call1->backtrace[i] != call2->backtrace[i])
74                         return false;
75
76         return true;
77 }
78
79 /* Defines struct failtable. */
80 HTABLE_DEFINE_TYPE(struct failtest_call, (struct failtest_call *), hash_call,
81                    call_eq, failtable);
82
83 bool (*failtest_exit_check)(struct tlist_calls *history);
84
85 static struct tlist_calls history = TLIST_INIT(history);
86 static int control_fd = -1;
87 static struct timeval start;
88 static bool probing = false;
89 static struct failtable failtable;
90
91 static struct write_call *child_writes = NULL;
92 static unsigned int child_writes_num = 0;
93
94 static pid_t lock_owner;
95 static struct lock_info *locks = NULL;
96 static unsigned int lock_num = 0;
97
98 static pid_t orig_pid;
99
100 static const char info_to_arg[] = "mceoxprwfa";
101
102 /* Dummy call used for failtest_undo wrappers. */
103 static struct failtest_call unrecorded_call;
104
105 #if HAVE_BACKTRACE
106 #include <execinfo.h>
107
108 static void **get_backtrace(unsigned int *num)
109 {
110         static unsigned int max_back = 100;
111         void **ret;
112
113 again:
114         ret = malloc(max_back * sizeof(void *));
115         *num = backtrace(ret, max_back);
116         if (*num == max_back) {
117                 free(ret);
118                 max_back *= 2;
119                 goto again;
120         }
121         return ret;
122 }
123 #else
124 /* This will test slightly less, since will consider all of the same
125  * calls as identical.  But, it's slightly faster! */
126 static void **get_backtrace(unsigned int *num)
127 {
128         *num = 0;
129         return NULL;
130 }
131 #endif /* HAVE_BACKTRACE */
132
133 static struct failtest_call *add_history_(enum failtest_call_type type,
134                                           const char *file,
135                                           unsigned int line,
136                                           const void *elem,
137                                           size_t elem_size)
138 {
139         struct failtest_call *call;
140
141         /* NULL file is how we suppress failure. */
142         if (!file)
143                 return &unrecorded_call;
144
145         call = malloc(sizeof *call);
146         call->type = type;
147         call->file = file;
148         call->line = line;
149         call->cleanup = NULL;
150         call->backtrace = get_backtrace(&call->backtrace_num);
151         memcpy(&call->u, elem, elem_size);
152         tlist_add_tail(&history, call, list);
153         return call;
154 }
155
156 #define add_history(type, file, line, elem) \
157         add_history_((type), (file), (line), (elem), sizeof(*(elem)))
158
159 /* We do a fake call inside a sizeof(), to check types. */
160 #define set_cleanup(call, clean, type)                  \
161         (call)->cleanup = (void *)((void)sizeof(clean((type *)NULL),1), (clean))
162
163
164 /* Dup the fd to a high value (out of the way I hope!), and close the old fd. */
165 static int move_fd_to_high(int fd)
166 {
167         int i;
168
169         for (i = FD_SETSIZE - 1; i >= 0; i--) {
170                 if (fcntl(i, F_GETFL) == -1 && errno == EBADF) {
171                         if (dup2(fd, i) == -1)
172                                 err(1, "Failed to dup fd %i to %i", fd, i);
173                         close(fd);
174                         return i;
175                 }
176         }
177         /* Nothing?  Really?  Er... ok? */
178         return fd;
179 }
180
181 static bool read_write_info(int fd)
182 {
183         struct write_call *w;
184         char *buf;
185
186         /* We don't need all of this, but it's simple. */
187         child_writes = realloc(child_writes,
188                                (child_writes_num+1) * sizeof(child_writes[0]));
189         w = &child_writes[child_writes_num];
190         if (!read_all(fd, w, sizeof(*w)))
191                 return false;
192
193         w->buf = buf = malloc(w->count);
194         if (!read_all(fd, buf, w->count))
195                 return false;
196
197         child_writes_num++;
198         return true;
199 }
200
201 static char *failpath_string(void)
202 {
203         struct failtest_call *i;
204         char *ret = strdup("");
205         unsigned len = 0;
206
207         /* Inefficient, but who cares? */
208         tlist_for_each(&history, i, list) {
209                 ret = realloc(ret, len + 2);
210                 ret[len] = info_to_arg[i->type];
211                 if (i->fail)
212                         ret[len] = toupper(ret[len]);
213                 ret[++len] = '\0';
214         }
215         return ret;
216 }
217
218 static void warn_via_fd(int e, const char *fmt, va_list ap)
219 {
220         char *p = failpath_string();
221
222         vdprintf(warnfd, fmt, ap);
223         if (e != -1)
224                 dprintf(warnfd, ": %s", strerror(e));
225         dprintf(warnfd, " [%s]\n", p);
226         free(p);
227 }
228
229 static void fwarn(const char *fmt, ...)
230 {
231         va_list ap;
232         int e = errno;
233
234         va_start(ap, fmt);
235         warn_via_fd(e, fmt, ap);
236         va_end(ap);
237 }
238
239
240 static void fwarnx(const char *fmt, ...)
241 {
242         va_list ap;
243
244         va_start(ap, fmt);
245         warn_via_fd(-1, fmt, ap);
246         va_end(ap);
247 }
248
249 static void tell_parent(enum info_type type)
250 {
251         if (control_fd != -1)
252                 write_all(control_fd, &type, sizeof(type));
253 }
254
255 static void child_fail(const char *out, size_t outlen, const char *fmt, ...)
256 {
257         va_list ap;
258         char *path = failpath_string();
259
260         va_start(ap, fmt);
261         vfprintf(stderr, fmt, ap);
262         va_end(ap);
263
264         fprintf(stderr, "%.*s", (int)outlen, out);
265         printf("To reproduce: --failpath=%s\n", path);
266         free(path);
267         tell_parent(FAILURE);
268         exit(1);
269 }
270
271 static void trace(const char *fmt, ...)
272 {
273         va_list ap;
274
275         if (tracefd == -1)
276                 return;
277
278         va_start(ap, fmt);
279         vdprintf(tracefd, fmt, ap);
280         va_end(ap);
281 }
282
283 static pid_t child;
284
285 static void hand_down(int signum)
286 {
287         kill(child, signum);
288 }
289
290 static void release_locks(void)
291 {
292         /* Locks were never acquired/reacquired? */
293         if (lock_owner == 0)
294                 return;
295
296         /* We own them?  Release them all. */
297         if (lock_owner == getpid()) {
298                 unsigned int i;
299                 struct flock fl;
300                 fl.l_type = F_UNLCK;
301                 fl.l_whence = SEEK_SET;
302                 fl.l_start = 0;
303                 fl.l_len = 0;
304
305                 for (i = 0; i < lock_num; i++)
306                         fcntl(locks[i].fd, F_SETLK, &fl);
307         } else {
308                 /* Our parent must have them; pass request up. */
309                 enum info_type type = RELEASE_LOCKS;
310                 assert(control_fd != -1);
311                 write_all(control_fd, &type, sizeof(type));
312         }
313         lock_owner = 0;
314 }
315
316 /* off_t is a signed type.  Getting its max is non-trivial. */
317 static off_t off_max(void)
318 {
319         BUILD_ASSERT(sizeof(off_t) == 4 || sizeof(off_t) == 8);
320         if (sizeof(off_t) == 4)
321                 return (off_t)0x7FFFFFF;
322         else
323                 return (off_t)0x7FFFFFFFFFFFFFFULL;
324 }
325
326 static void get_locks(void)
327 {
328         unsigned int i;
329         struct flock fl;
330
331         if (lock_owner == getpid())
332                 return;
333
334         if (lock_owner != 0) {
335                 enum info_type type = RELEASE_LOCKS;
336                 assert(control_fd != -1);
337                 write_all(control_fd, &type, sizeof(type));
338         }
339
340         fl.l_whence = SEEK_SET;
341
342         for (i = 0; i < lock_num; i++) {
343                 fl.l_type = locks[i].type;
344                 fl.l_start = locks[i].start;
345                 if (locks[i].end == off_max())
346                         fl.l_len = 0;
347                 else
348                         fl.l_len = locks[i].end - locks[i].start + 1;
349
350                 if (fcntl(locks[i].fd, F_SETLKW, &fl) != 0)
351                         abort();
352         }
353         lock_owner = getpid();
354 }
355
356 struct saved_file {
357         struct saved_file *next;
358         int fd;
359         void *contents;
360         off_t off, len;
361 };
362
363 static struct saved_file *save_file(struct saved_file *next, int fd)
364 {
365         struct saved_file *s = malloc(sizeof(*s));
366
367         s->next = next;
368         s->fd = fd;
369         s->off = lseek(fd, 0, SEEK_CUR);
370         /* Special file?  Erk... */
371         assert(s->off != -1);
372         s->len = lseek(fd, 0, SEEK_END);
373         lseek(fd, 0, SEEK_SET);
374         s->contents = malloc(s->len);
375         if (read(fd, s->contents, s->len) != s->len)
376                 err(1, "Failed to save %zu bytes", (size_t)s->len);
377         lseek(fd, s->off, SEEK_SET);
378         return s;
379 }
380         
381 /* We have little choice but to save and restore open files: mmap means we
382  * can really intercept changes in the child.
383  *
384  * We could do non-mmap'ed files on demand, however. */
385 static struct saved_file *save_files(void)
386 {
387         struct saved_file *files = NULL;
388         struct failtest_call *i;
389
390         /* Figure out the set of live fds. */
391         tlist_for_each_rev(&history, i, list) {
392                 if (i->type == FAILTEST_OPEN) {
393                         int fd = i->u.open.ret;
394                         /* Only do successful, writable fds. */
395                         if (fd < 0)
396                                 continue;
397
398                         /* If it was closed, cleanup == NULL. */
399                         if (!i->cleanup)
400                                 continue;
401
402                         if ((i->u.open.flags & O_RDWR) == O_RDWR) {
403                                 files = save_file(files, fd);
404                         } else if ((i->u.open.flags & O_WRONLY)
405                                    == O_WRONLY) {
406                                 /* FIXME: Handle O_WRONLY.  Open with O_RDWR? */
407                                 abort();
408                         }
409                 }
410         }
411
412         return files;
413 }
414
415 static void restore_files(struct saved_file *s)
416 {
417         while (s) {
418                 struct saved_file *next = s->next;
419
420                 lseek(s->fd, 0, SEEK_SET);
421                 if (write(s->fd, s->contents, s->len) != s->len)
422                         err(1, "Failed to restore %zu bytes", (size_t)s->len);
423                 if (ftruncate(s->fd, s->len) != 0)
424                         err(1, "Failed to trim file to length %zu",
425                             (size_t)s->len);
426                 free(s->contents);
427                 lseek(s->fd, s->off, SEEK_SET);
428                 free(s);
429                 s = next;
430         }
431 }
432
433 static void free_files(struct saved_file *s)
434 {
435         while (s) {
436                 struct saved_file *next = s->next;
437                 free(s->contents);
438                 free(s);
439                 s = next;
440         }
441 }
442
443 static void free_call(struct failtest_call *call)
444 {
445         /* We don't do this in cleanup: needed even for failed opens. */
446         if (call->type == FAILTEST_OPEN)
447                 free((char *)call->u.open.pathname);
448         free(call->backtrace);
449         tlist_del_from(&history, call, list);
450         free(call);
451 }
452
453 /* Free up memory, so valgrind doesn't report leaks. */
454 static void free_everything(void)
455 {
456         struct failtest_call *i;
457
458         while ((i = tlist_top(&history, struct failtest_call, list)) != NULL)
459                 free_call(i);
460
461         failtable_clear(&failtable);
462 }
463
464 static NORETURN void failtest_cleanup(bool forced_cleanup, int status)
465 {
466         struct failtest_call *i;
467
468         /* For children, we don't care if they "failed" the testing. */
469         if (control_fd != -1)
470                 status = 0;
471
472         if (forced_cleanup) {
473                 /* We didn't actually do final operation: remove it. */
474                 i = tlist_tail(&history, struct failtest_call, list);
475                 free_call(i);
476         }
477
478         /* Cleanup everything, in reverse order. */
479         tlist_for_each_rev(&history, i, list) {
480                 if (!i->cleanup)
481                         continue;
482                 if (!forced_cleanup) {
483                         printf("Leak at %s:%u: --failpath=%s\n",
484                                i->file, i->line, failpath_string());
485                         status = 1;
486                 }
487                 i->cleanup(&i->u);
488         }
489
490         free_everything();
491         tell_parent(SUCCESS);
492         exit(status);
493 }
494
495 static bool should_fail(struct failtest_call *call)
496 {
497         int status;
498         int control[2], output[2];
499         enum info_type type = UNEXPECTED;
500         char *out = NULL;
501         size_t outlen = 0;
502         struct saved_file *files;
503         struct failtest_call *dup;
504
505         if (call == &unrecorded_call)
506                 return false;
507
508         if (failpath) {
509                 /* + means continue after end, like normal. */
510                 if (*failpath == '+')
511                         failpath = NULL;
512                 else if (*failpath == '\0') {
513                         /* Continue, but don't inject errors. */
514                         return call->fail = false;
515                 } else {
516                         if (tolower((unsigned char)*failpath)
517                             != info_to_arg[call->type])
518                                 errx(1, "Failpath expected '%c' got '%c'\n",
519                                      info_to_arg[call->type], *failpath);
520                         call->fail = cisupper(*(failpath++));
521                         return call->fail;
522                 }
523         }
524
525         /* Attach debugger if they asked for it. */
526         if (debugpath) {
527                 char *path;
528
529                 /* Pretend this last call matches whatever path wanted:
530                  * keeps valgrind happy. */
531                 call->fail = cisupper(debugpath[strlen(debugpath)-1]);
532                 path = failpath_string();
533
534                 if (streq(path, debugpath)) {
535                         char str[80];
536
537                         /* Don't timeout. */
538                         signal(SIGUSR1, SIG_IGN);
539                         sprintf(str, "xterm -e gdb /proc/%d/exe %d &",
540                                 getpid(), getpid());
541                         if (system(str) == 0)
542                                 sleep(5);
543                 } else {
544                         /* Ignore last character: could be upper or lower. */
545                         path[strlen(path)-1] = '\0';
546                         if (!strstarts(debugpath, path)) {
547                                 fprintf(stderr,
548                                         "--debugpath not followed: %s\n", path);
549                                 debugpath = NULL;
550                         }
551                 }
552                 free(path);
553         }
554
555         /* Are we probing?  If so, we never fail twice. */
556         if (probing)
557                 return call->fail = false;
558
559         /* Don't more than once in the same place. */
560         dup = failtable_get(&failtable, call);
561         if (dup)
562                 return call->fail = false;
563
564         if (failtest_hook) {
565                 switch (failtest_hook(&history)) {
566                 case FAIL_OK:
567                         break;
568                 case FAIL_PROBE:
569                         probing = true;
570                         break;
571                 case FAIL_DONT_FAIL:
572                         call->fail = false;
573                         return false;
574                 default:
575                         abort();
576                 }
577         }
578
579         /* Add it to our table of calls. */
580         failtable_add(&failtable, call);
581
582         files = save_files();
583
584         /* We're going to fail in the child. */
585         call->fail = true;
586         if (pipe(control) != 0 || pipe(output) != 0)
587                 err(1, "opening pipe");
588
589         /* Prevent double-printing (in child and parent) */
590         fflush(stdout);
591         child = fork();
592         if (child == -1)
593                 err(1, "forking failed");
594
595         if (child == 0) {
596                 if (tracefd != -1) {
597                         struct timeval diff;
598                         const char *p;
599                         char *failpath;
600                         struct failtest_call *c;
601
602                         c = tlist_tail(&history, struct failtest_call, list);
603                         diff = time_sub(time_now(), start);
604                         failpath = failpath_string();
605                         trace("%u->%u (%u.%02u): %s (", getppid(), getpid(),
606                               (int)diff.tv_sec, (int)diff.tv_usec / 10000,
607                               failpath);
608                         free(failpath);
609                         p = strrchr(c->file, '/');
610                         if (p)
611                                 trace("%s", p+1);
612                         else
613                                 trace("%s", c->file);
614                         trace(":%u)\n", c->line);
615                 }
616                 close(control[0]);
617                 close(output[0]);
618                 dup2(output[1], STDOUT_FILENO);
619                 dup2(output[1], STDERR_FILENO);
620                 if (output[1] != STDOUT_FILENO && output[1] != STDERR_FILENO)
621                         close(output[1]);
622                 control_fd = move_fd_to_high(control[1]);
623                 /* Valgrind spots the leak if we don't free these. */
624                 free_files(files);
625                 return true;
626         }
627
628         signal(SIGUSR1, hand_down);
629
630         close(control[1]);
631         close(output[1]);
632
633         /* We grab output so we can display it; we grab writes so we
634          * can compare. */
635         do {
636                 struct pollfd pfd[2];
637                 int ret;
638
639                 pfd[0].fd = output[0];
640                 pfd[0].events = POLLIN|POLLHUP;
641                 pfd[1].fd = control[0];
642                 pfd[1].events = POLLIN|POLLHUP;
643
644                 if (type == SUCCESS)
645                         ret = poll(pfd, 1, failtest_timeout_ms);
646                 else
647                         ret = poll(pfd, 2, failtest_timeout_ms);
648
649                 if (ret == 0)
650                         hand_down(SIGUSR1);
651                 if (ret < 0) {
652                         if (errno == EINTR)
653                                 continue;
654                         err(1, "Poll returned %i", ret);
655                 }
656
657                 if (pfd[0].revents & POLLIN) {
658                         ssize_t len;
659
660                         out = realloc(out, outlen + 8192);
661                         len = read(output[0], out + outlen, 8192);
662                         outlen += len;
663                 } else if (type != SUCCESS && (pfd[1].revents & POLLIN)) {
664                         if (read_all(control[0], &type, sizeof(type))) {
665                                 if (type == WRITE) {
666                                         if (!read_write_info(control[0]))
667                                                 break;
668                                 } else if (type == RELEASE_LOCKS) {
669                                         release_locks();
670                                         /* FIXME: Tell them we're done... */
671                                 }
672                         }
673                 } else if (pfd[0].revents & POLLHUP) {
674                         break;
675                 }
676         } while (type != FAILURE);
677
678         close(output[0]);
679         close(control[0]);
680         waitpid(child, &status, 0);
681         if (!WIFEXITED(status)) {
682                 if (WTERMSIG(status) == SIGUSR1)
683                         child_fail(out, outlen, "Timed out");
684                 else
685                         child_fail(out, outlen, "Killed by signal %u: ",
686                                    WTERMSIG(status));
687         }
688         /* Child printed failure already, just pass up exit code. */
689         if (type == FAILURE) {
690                 fprintf(stderr, "%.*s", (int)outlen, out);
691                 tell_parent(type);
692                 exit(WEXITSTATUS(status) ? WEXITSTATUS(status) : 1);
693         }
694         if (WEXITSTATUS(status) != 0)
695                 child_fail(out, outlen, "Exited with status %i: ",
696                            WEXITSTATUS(status));
697
698         free(out);
699         signal(SIGUSR1, SIG_DFL);
700
701         restore_files(files);
702
703         /* Only child does probe. */
704         probing = false;
705
706         /* We continue onwards without failing. */
707         call->fail = false;
708         return false;
709 }
710
711 static void cleanup_calloc(struct calloc_call *call)
712 {
713         free(call->ret);
714 }
715
716 void *failtest_calloc(size_t nmemb, size_t size,
717                       const char *file, unsigned line)
718 {
719         struct failtest_call *p;
720         struct calloc_call call;
721         call.nmemb = nmemb;
722         call.size = size;
723         p = add_history(FAILTEST_CALLOC, file, line, &call);
724
725         if (should_fail(p)) {
726                 p->u.calloc.ret = NULL;
727                 p->error = ENOMEM;
728         } else {
729                 p->u.calloc.ret = calloc(nmemb, size);
730                 set_cleanup(p, cleanup_calloc, struct calloc_call);
731         }
732         errno = p->error;
733         return p->u.calloc.ret;
734 }
735
736 static void cleanup_malloc(struct malloc_call *call)
737 {
738         free(call->ret);
739 }
740
741 void *failtest_malloc(size_t size, const char *file, unsigned line)
742 {
743         struct failtest_call *p;
744         struct malloc_call call;
745         call.size = size;
746
747         p = add_history(FAILTEST_MALLOC, file, line, &call);
748         if (should_fail(p)) {
749                 p->u.malloc.ret = NULL;
750                 p->error = ENOMEM;
751         } else {
752                 p->u.malloc.ret = malloc(size);
753                 set_cleanup(p, cleanup_malloc, struct malloc_call);
754         }
755         errno = p->error;
756         return p->u.malloc.ret;
757 }
758
759 static void cleanup_realloc(struct realloc_call *call)
760 {
761         free(call->ret);
762 }
763
764 /* Walk back and find out if we got this ptr from a previous routine. */
765 static void fixup_ptr_history(void *ptr)
766 {
767         struct failtest_call *i;
768
769         /* Start at end of history, work back. */
770         tlist_for_each_rev(&history, i, list) {
771                 switch (i->type) {
772                 case FAILTEST_REALLOC:
773                         if (i->u.realloc.ret == ptr) {
774                                 i->cleanup = NULL;
775                                 return;
776                         }
777                         break;
778                 case FAILTEST_MALLOC:
779                         if (i->u.malloc.ret == ptr) {
780                                 i->cleanup = NULL;
781                                 return;
782                         }
783                         break;
784                 case FAILTEST_CALLOC:
785                         if (i->u.calloc.ret == ptr) {
786                                 i->cleanup = NULL;
787                                 return;
788                         }
789                         break;
790                 default:
791                         break;
792                 }
793         }
794 }
795
796 void *failtest_realloc(void *ptr, size_t size, const char *file, unsigned line)
797 {
798         struct failtest_call *p;
799         struct realloc_call call;
800         call.size = size;
801         p = add_history(FAILTEST_REALLOC, file, line, &call);
802
803         /* FIXME: Try one child moving allocation, one not. */
804         if (should_fail(p)) {
805                 p->u.realloc.ret = NULL;
806                 p->error = ENOMEM;
807         } else {
808                 /* Don't catch this one in the history fixup... */
809                 p->u.realloc.ret = NULL;
810                 fixup_ptr_history(ptr);
811                 p->u.realloc.ret = realloc(ptr, size);
812                 set_cleanup(p, cleanup_realloc, struct realloc_call);
813         }
814         errno = p->error;
815         return p->u.realloc.ret;
816 }
817
818 void failtest_free(void *ptr)
819 {
820         fixup_ptr_history(ptr);
821         free(ptr);
822 }
823
824 static void cleanup_open(struct open_call *call)
825 {
826         close(call->ret);
827 }
828
829 int failtest_open(const char *pathname,
830                   const char *file, unsigned line, ...)
831 {
832         struct failtest_call *p;
833         struct open_call call;
834         va_list ap;
835
836         call.pathname = strdup(pathname);
837         va_start(ap, line);
838         call.flags = va_arg(ap, int);
839         if (call.flags & O_CREAT) {
840                 call.mode = va_arg(ap, int);
841                 va_end(ap);
842         }
843         p = add_history(FAILTEST_OPEN, file, line, &call);
844         /* Avoid memory leak! */
845         if (p == &unrecorded_call)
846                 free((char *)call.pathname);
847         p->u.open.ret = open(pathname, call.flags, call.mode);
848
849         if (p->u.open.ret == -1) {
850                 p->fail = false;
851                 p->error = errno;
852         } else if (should_fail(p)) {
853                 close(p->u.open.ret);
854                 p->u.open.ret = -1;
855                 /* FIXME: Play with error codes? */
856                 p->error = EACCES;
857         } else {
858                 set_cleanup(p, cleanup_open, struct open_call);
859         }
860         errno = p->error;
861         return p->u.open.ret;
862 }
863
864 void *failtest_mmap(void *addr, size_t length, int prot, int flags,
865                     int fd, off_t offset, const char *file, unsigned line)
866 {
867         struct failtest_call *p;
868         struct mmap_call call;
869
870         call.addr = addr;
871         call.length = length;
872         call.prot = prot;
873         call.flags = flags;
874         call.offset = offset;
875         call.fd = fd;
876
877         p = add_history(FAILTEST_MMAP, file, line, &call);
878         if (should_fail(p)) {
879                 p->u.mmap.ret = MAP_FAILED;
880                 p->error = ENOMEM;
881         } else {
882                 p->u.mmap.ret = mmap(addr, length, prot, flags, fd, offset);
883         }
884         errno = p->error;
885         return p->u.mmap.ret;
886 }
887
888 static void cleanup_pipe(struct pipe_call *call)
889 {
890         if (!call->closed[0])
891                 close(call->fds[0]);
892         if (!call->closed[1])
893                 close(call->fds[1]);
894 }
895
896 int failtest_pipe(int pipefd[2], const char *file, unsigned line)
897 {
898         struct failtest_call *p;
899         struct pipe_call call;
900
901         p = add_history(FAILTEST_PIPE, file, line, &call);
902         if (should_fail(p)) {
903                 p->u.open.ret = -1;
904                 /* FIXME: Play with error codes? */
905                 p->error = EMFILE;
906         } else {
907                 p->u.pipe.ret = pipe(p->u.pipe.fds);
908                 p->u.pipe.closed[0] = p->u.pipe.closed[1] = false;
909                 set_cleanup(p, cleanup_pipe, struct pipe_call);
910         }
911         /* This causes valgrind to notice if they use pipefd[] after failure */
912         memcpy(pipefd, p->u.pipe.fds, sizeof(p->u.pipe.fds));
913         errno = p->error;
914         return p->u.pipe.ret;
915 }
916
917 ssize_t failtest_pread(int fd, void *buf, size_t count, off_t off,
918                        const char *file, unsigned line)
919 {
920         struct failtest_call *p;
921         struct read_call call;
922         call.fd = fd;
923         call.buf = buf;
924         call.count = count;
925         call.off = off;
926         p = add_history(FAILTEST_READ, file, line, &call);
927
928         /* FIXME: Try partial read returns. */
929         if (should_fail(p)) {
930                 p->u.read.ret = -1;
931                 p->error = EIO;
932         } else {
933                 p->u.read.ret = pread(fd, buf, count, off);
934         }
935         errno = p->error;
936         return p->u.read.ret;
937 }
938
939 ssize_t failtest_pwrite(int fd, const void *buf, size_t count, off_t off,
940                         const char *file, unsigned line)
941 {
942         struct failtest_call *p;
943         struct write_call call;
944
945         call.fd = fd;
946         call.buf = buf;
947         call.count = count;
948         call.off = off;
949         p = add_history(FAILTEST_WRITE, file, line, &call);
950
951         /* If we're a child, we need to make sure we write the same thing
952          * to non-files as the parent does, so tell it. */
953         if (control_fd != -1 && off == (off_t)-1) {
954                 enum info_type type = WRITE;
955
956                 write_all(control_fd, &type, sizeof(type));
957                 write_all(control_fd, &p->u.write, sizeof(p->u.write));
958                 write_all(control_fd, buf, count);
959         }
960
961         /* FIXME: Try partial write returns. */
962         if (should_fail(p)) {
963                 p->u.write.ret = -1;
964                 p->error = EIO;
965         } else {
966                 /* FIXME: We assume same write order in parent and child */
967                 if (off == (off_t)-1 && child_writes_num != 0) {
968                         if (child_writes[0].fd != fd)
969                                 errx(1, "Child wrote to fd %u, not %u?",
970                                      child_writes[0].fd, fd);
971                         if (child_writes[0].off != p->u.write.off)
972                                 errx(1, "Child wrote to offset %zu, not %zu?",
973                                      (size_t)child_writes[0].off,
974                                      (size_t)p->u.write.off);
975                         if (child_writes[0].count != count)
976                                 errx(1, "Child wrote length %zu, not %zu?",
977                                      child_writes[0].count, count);
978                         if (memcmp(child_writes[0].buf, buf, count)) {
979                                 child_fail(NULL, 0,
980                                            "Child wrote differently to"
981                                            " fd %u than we did!\n", fd);
982                         }
983                         free((char *)child_writes[0].buf);
984                         child_writes_num--;
985                         memmove(&child_writes[0], &child_writes[1],
986                                 sizeof(child_writes[0]) * child_writes_num);
987
988                         /* Is this is a socket or pipe, child wrote it
989                            already. */
990                         if (p->u.write.off == (off_t)-1) {
991                                 p->u.write.ret = count;
992                                 errno = p->error;
993                                 return p->u.write.ret;
994                         }
995                 }
996                 p->u.write.ret = pwrite(fd, buf, count, off);
997         }
998         errno = p->error;
999         return p->u.write.ret;
1000 }
1001
1002 ssize_t failtest_read(int fd, void *buf, size_t count,
1003                       const char *file, unsigned line)
1004 {
1005         return failtest_pread(fd, buf, count, lseek(fd, 0, SEEK_CUR),
1006                               file, line);
1007 }
1008
1009 ssize_t failtest_write(int fd, const void *buf, size_t count,
1010                        const char *file, unsigned line)
1011 {
1012         return failtest_pwrite(fd, buf, count, lseek(fd, 0, SEEK_CUR),
1013                                file, line);
1014 }
1015
1016 static struct lock_info *WARN_UNUSED_RESULT
1017 add_lock(struct lock_info *locks, int fd, off_t start, off_t end, int type)
1018 {
1019         unsigned int i;
1020         struct lock_info *l;
1021
1022         for (i = 0; i < lock_num; i++) {
1023                 l = &locks[i];
1024
1025                 if (l->fd != fd)
1026                         continue;
1027                 /* Four cases we care about:
1028                  * Start overlap:
1029                  *      l =    |      |
1030                  *      new = |   |
1031                  * Mid overlap:
1032                  *      l =    |      |
1033                  *      new =    |  |
1034                  * End overlap:
1035                  *      l =    |      |
1036                  *      new =      |    |
1037                  * Total overlap:
1038                  *      l =    |      |
1039                  *      new = |         |
1040                  */
1041                 if (start > l->start && end < l->end) {
1042                         /* Mid overlap: trim entry, add new one. */
1043                         off_t new_start, new_end;
1044                         new_start = end + 1;
1045                         new_end = l->end;
1046                         l->end = start - 1;
1047                         locks = add_lock(locks,
1048                                          fd, new_start, new_end, l->type);
1049                         l = &locks[i];
1050                 } else if (start <= l->start && end >= l->end) {
1051                         /* Total overlap: eliminate entry. */
1052                         l->end = 0;
1053                         l->start = 1;
1054                 } else if (end >= l->start && end < l->end) {
1055                         /* Start overlap: trim entry. */
1056                         l->start = end + 1;
1057                 } else if (start > l->start && start <= l->end) {
1058                         /* End overlap: trim entry. */
1059                         l->end = start-1;
1060                 }
1061                 /* Nothing left?  Remove it. */
1062                 if (l->end < l->start) {
1063                         memmove(l, l + 1, (--lock_num - i) * sizeof(l[0]));
1064                         i--;
1065                 }
1066         }
1067
1068         if (type != F_UNLCK) {
1069                 locks = realloc(locks, (lock_num + 1) * sizeof(*locks));
1070                 l = &locks[lock_num++];
1071                 l->fd = fd;
1072                 l->start = start;
1073                 l->end = end;
1074                 l->type = type;
1075         }
1076         return locks;
1077 }
1078
1079 /* We trap this so we can record it: we don't fail it. */
1080 int failtest_close(int fd, const char *file, unsigned line)
1081 {
1082         struct failtest_call *i;
1083         struct close_call call;
1084         struct failtest_call *p;
1085
1086         call.fd = fd;
1087         p = add_history(FAILTEST_CLOSE, file, line, &call);
1088         p->fail = false;
1089
1090         /* Consume close from failpath. */
1091         if (failpath)
1092                 if (should_fail(p))
1093                         abort();
1094
1095         if (fd < 0)
1096                 return close(fd);
1097
1098         /* Trace history to find source of fd. */
1099         tlist_for_each_rev(&history, i, list) {
1100                 switch (i->type) {
1101                 case FAILTEST_PIPE:
1102                         /* From a pipe? */
1103                         if (i->u.pipe.fds[0] == fd) {
1104                                 assert(!i->u.pipe.closed[0]);
1105                                 i->u.pipe.closed[0] = true;
1106                                 if (i->u.pipe.closed[1])
1107                                         i->cleanup = NULL;
1108                                 goto out;
1109                         }
1110                         if (i->u.pipe.fds[1] == fd) {
1111                                 assert(!i->u.pipe.closed[1]);
1112                                 i->u.pipe.closed[1] = true;
1113                                 if (i->u.pipe.closed[0])
1114                                         i->cleanup = NULL;
1115                                 goto out;
1116                         }
1117                         break;
1118                 case FAILTEST_OPEN:
1119                         if (i->u.open.ret == fd) {
1120                                 assert((void *)i->cleanup
1121                                        == (void *)cleanup_open);
1122                                 i->cleanup = NULL;
1123                                 goto out;
1124                         }
1125                         break;
1126                 default:
1127                         break;
1128                 }
1129         }
1130
1131 out:
1132         locks = add_lock(locks, fd, 0, off_max(), F_UNLCK);
1133         return close(fd);
1134 }
1135
1136 /* Zero length means "to end of file" */
1137 static off_t end_of(off_t start, off_t len)
1138 {
1139         if (len == 0)
1140                 return off_max();
1141         return start + len - 1;
1142 }
1143
1144 /* FIXME: This only handles locks, really. */
1145 int failtest_fcntl(int fd, const char *file, unsigned line, int cmd, ...)
1146 {
1147         struct failtest_call *p;
1148         struct fcntl_call call;
1149         va_list ap;
1150
1151         call.fd = fd;
1152         call.cmd = cmd;
1153
1154         /* Argument extraction. */
1155         switch (cmd) {
1156         case F_SETFL:
1157         case F_SETFD:
1158                 va_start(ap, cmd);
1159                 call.arg.l = va_arg(ap, long);
1160                 va_end(ap);
1161                 return fcntl(fd, cmd, call.arg.l);
1162         case F_GETFD:
1163         case F_GETFL:
1164                 return fcntl(fd, cmd);
1165         case F_GETLK:
1166                 get_locks();
1167                 va_start(ap, cmd);
1168                 call.arg.fl = *va_arg(ap, struct flock *);
1169                 va_end(ap);
1170                 return fcntl(fd, cmd, &call.arg.fl);
1171         case F_SETLK:
1172         case F_SETLKW:
1173                 va_start(ap, cmd);
1174                 call.arg.fl = *va_arg(ap, struct flock *);
1175                 va_end(ap);
1176                 break;
1177         default:
1178                 /* This means you need to implement it here. */
1179                 err(1, "failtest: unknown fcntl %u", cmd);
1180         }
1181
1182         p = add_history(FAILTEST_FCNTL, file, line, &call);
1183
1184         if (should_fail(p)) {
1185                 p->u.fcntl.ret = -1;
1186                 if (p->u.fcntl.cmd == F_SETLK)
1187                         p->error = EAGAIN;
1188                 else
1189                         p->error = EDEADLK;
1190         } else {
1191                 get_locks();
1192                 p->u.fcntl.ret = fcntl(p->u.fcntl.fd, p->u.fcntl.cmd,
1193                                        &p->u.fcntl.arg.fl);
1194                 if (p->u.fcntl.ret == -1)
1195                         p->error = errno;
1196                 else {
1197                         /* We don't handle anything else yet. */
1198                         assert(p->u.fcntl.arg.fl.l_whence == SEEK_SET);
1199                         locks = add_lock(locks,
1200                                          p->u.fcntl.fd,
1201                                          p->u.fcntl.arg.fl.l_start,
1202                                          end_of(p->u.fcntl.arg.fl.l_start,
1203                                                 p->u.fcntl.arg.fl.l_len),
1204                                          p->u.fcntl.arg.fl.l_type);
1205                 }
1206         }
1207         errno = p->error;
1208         return p->u.fcntl.ret;
1209 }
1210
1211 pid_t failtest_getpid(const char *file, unsigned line)
1212 {
1213         /* You must call failtest_init first! */
1214         assert(orig_pid);
1215         return orig_pid;
1216 }
1217         
1218 void failtest_init(int argc, char *argv[])
1219 {
1220         unsigned int i;
1221
1222         orig_pid = getpid();
1223
1224         warnfd = move_fd_to_high(dup(STDERR_FILENO));
1225         for (i = 1; i < argc; i++) {
1226                 if (!strncmp(argv[i], "--failpath=", strlen("--failpath="))) {
1227                         failpath = argv[i] + strlen("--failpath=");
1228                 } else if (strcmp(argv[i], "--tracepath") == 0) {
1229                         tracefd = warnfd;
1230                         failtest_timeout_ms = -1;
1231                 } else if (!strncmp(argv[i], "--debugpath=",
1232                                     strlen("--debugpath="))) {
1233                         debugpath = argv[i] + strlen("--debugpath=");
1234                 }
1235         }
1236         failtable_init(&failtable);
1237         start = time_now();
1238 }
1239
1240 bool failtest_has_failed(void)
1241 {
1242         return control_fd != -1;
1243 }
1244
1245 void failtest_exit(int status)
1246 {
1247         if (failtest_exit_check) {
1248                 if (!failtest_exit_check(&history))
1249                         child_fail(NULL, 0, "failtest_exit_check failed\n");
1250         }
1251
1252         failtest_cleanup(false, status);
1253 }