]> git.ozlabs.org Git - ccan/blob - ccan/failtest/failtest.c
failtest: stop when leak detected.
[ccan] / ccan / failtest / failtest.c
1 /* Licensed under LGPL - see LICENSE file for details */
2 #include <ccan/failtest/failtest.h>
3 #include <stdarg.h>
4 #include <string.h>
5 #include <stdio.h>
6 #include <stdarg.h>
7 #include <ctype.h>
8 #include <err.h>
9 #include <unistd.h>
10 #include <poll.h>
11 #include <errno.h>
12 #include <sys/types.h>
13 #include <sys/wait.h>
14 #include <sys/stat.h>
15 #include <sys/time.h>
16 #include <sys/mman.h>
17 #include <signal.h>
18 #include <assert.h>
19 #include <ccan/time/time.h>
20 #include <ccan/read_write_all/read_write_all.h>
21 #include <ccan/failtest/failtest_proto.h>
22 #include <ccan/build_assert/build_assert.h>
23 #include <ccan/hash/hash.h>
24 #include <ccan/htable/htable_type.h>
25 #include <ccan/str/str.h>
26
27 enum failtest_result (*failtest_hook)(struct tlist_calls *);
28
29 static int tracefd = -1;
30 static int warnfd;
31
32 unsigned int failtest_timeout_ms = 20000;
33
34 const char *failpath;
35 const char *debugpath;
36
37 enum info_type {
38         WRITE,
39         RELEASE_LOCKS,
40         FAILURE,
41         SUCCESS,
42         UNEXPECTED
43 };
44
45 struct lock_info {
46         int fd;
47         /* end is inclusive: you can't have a 0-byte lock. */
48         off_t start, end;
49         int type;
50 };
51
52 /* We hash the call location together with its backtrace. */
53 static size_t hash_call(const struct failtest_call *call)
54 {
55         return hash(call->file, strlen(call->file),
56                     hash(&call->line, 1,
57                          hash(call->backtrace, call->backtrace_num,
58                               call->type)));
59 }
60
61 static bool call_eq(const struct failtest_call *call1,
62                     const struct failtest_call *call2)
63 {
64         unsigned int i;
65
66         if (strcmp(call1->file, call2->file) != 0
67             || call1->line != call2->line
68             || call1->type != call2->type
69             || call1->backtrace_num != call2->backtrace_num)
70                 return false;
71
72         for (i = 0; i < call1->backtrace_num; i++)
73                 if (call1->backtrace[i] != call2->backtrace[i])
74                         return false;
75
76         return true;
77 }
78
79 /* Defines struct failtable. */
80 HTABLE_DEFINE_TYPE(struct failtest_call, (struct failtest_call *), hash_call,
81                    call_eq, failtable);
82
83 bool (*failtest_exit_check)(struct tlist_calls *history);
84
85 static struct tlist_calls history = TLIST_INIT(history);
86 static int control_fd = -1;
87 static struct timeval start;
88 static bool probing = false;
89 static struct failtable failtable;
90
91 static struct write_call *child_writes = NULL;
92 static unsigned int child_writes_num = 0;
93
94 static pid_t lock_owner;
95 static struct lock_info *locks = NULL;
96 static unsigned int lock_num = 0;
97
98 static pid_t orig_pid;
99
100 static const char info_to_arg[] = "mceoxprwfa";
101
102 /* Dummy call used for failtest_undo wrappers. */
103 static struct failtest_call unrecorded_call;
104
105 #if HAVE_BACKTRACE
106 #include <execinfo.h>
107
108 static void **get_backtrace(unsigned int *num)
109 {
110         static unsigned int max_back = 100;
111         void **ret;
112
113 again:
114         ret = malloc(max_back * sizeof(void *));
115         *num = backtrace(ret, max_back);
116         if (*num == max_back) {
117                 free(ret);
118                 max_back *= 2;
119                 goto again;
120         }
121         return ret;
122 }
123 #else
124 /* This will test slightly less, since will consider all of the same
125  * calls as identical.  But, it's slightly faster! */
126 static void **get_backtrace(unsigned int *num)
127 {
128         *num = 0;
129         return NULL;
130 }
131 #endif /* HAVE_BACKTRACE */
132
133 static struct failtest_call *add_history_(enum failtest_call_type type,
134                                           const char *file,
135                                           unsigned int line,
136                                           const void *elem,
137                                           size_t elem_size)
138 {
139         struct failtest_call *call;
140
141         /* NULL file is how we suppress failure. */
142         if (!file)
143                 return &unrecorded_call;
144
145         call = malloc(sizeof *call);
146         call->type = type;
147         call->file = file;
148         call->line = line;
149         call->cleanup = NULL;
150         call->backtrace = get_backtrace(&call->backtrace_num);
151         memcpy(&call->u, elem, elem_size);
152         tlist_add_tail(&history, call, list);
153         return call;
154 }
155
156 #define add_history(type, file, line, elem) \
157         add_history_((type), (file), (line), (elem), sizeof(*(elem)))
158
159 /* We do a fake call inside a sizeof(), to check types. */
160 #define set_cleanup(call, clean, type)                  \
161         (call)->cleanup = (void *)((void)sizeof(clean((type *)NULL),1), (clean))
162
163
164 /* Dup the fd to a high value (out of the way I hope!), and close the old fd. */
165 static int move_fd_to_high(int fd)
166 {
167         int i;
168
169         for (i = FD_SETSIZE - 1; i >= 0; i--) {
170                 if (fcntl(i, F_GETFL) == -1 && errno == EBADF) {
171                         if (dup2(fd, i) == -1)
172                                 err(1, "Failed to dup fd %i to %i", fd, i);
173                         close(fd);
174                         return i;
175                 }
176         }
177         /* Nothing?  Really?  Er... ok? */
178         return fd;
179 }
180
181 static bool read_write_info(int fd)
182 {
183         struct write_call *w;
184         char *buf;
185
186         /* We don't need all of this, but it's simple. */
187         child_writes = realloc(child_writes,
188                                (child_writes_num+1) * sizeof(child_writes[0]));
189         w = &child_writes[child_writes_num];
190         if (!read_all(fd, w, sizeof(*w)))
191                 return false;
192
193         w->buf = buf = malloc(w->count);
194         if (!read_all(fd, buf, w->count))
195                 return false;
196
197         child_writes_num++;
198         return true;
199 }
200
201 static char *failpath_string(void)
202 {
203         struct failtest_call *i;
204         char *ret = strdup("");
205         unsigned len = 0;
206
207         /* Inefficient, but who cares? */
208         tlist_for_each(&history, i, list) {
209                 ret = realloc(ret, len + 2);
210                 ret[len] = info_to_arg[i->type];
211                 if (i->fail)
212                         ret[len] = toupper(ret[len]);
213                 ret[++len] = '\0';
214         }
215         return ret;
216 }
217
218 static void warn_via_fd(int e, const char *fmt, va_list ap)
219 {
220         char *p = failpath_string();
221
222         vdprintf(warnfd, fmt, ap);
223         if (e != -1)
224                 dprintf(warnfd, ": %s", strerror(e));
225         dprintf(warnfd, " [%s]\n", p);
226         free(p);
227 }
228
229 static void fwarn(const char *fmt, ...)
230 {
231         va_list ap;
232         int e = errno;
233
234         va_start(ap, fmt);
235         warn_via_fd(e, fmt, ap);
236         va_end(ap);
237 }
238
239
240 static void fwarnx(const char *fmt, ...)
241 {
242         va_list ap;
243
244         va_start(ap, fmt);
245         warn_via_fd(-1, fmt, ap);
246         va_end(ap);
247 }
248
249 static void tell_parent(enum info_type type)
250 {
251         if (control_fd != -1)
252                 write_all(control_fd, &type, sizeof(type));
253 }
254
255 static void child_fail(const char *out, size_t outlen, const char *fmt, ...)
256 {
257         va_list ap;
258         char *path = failpath_string();
259
260         va_start(ap, fmt);
261         vfprintf(stderr, fmt, ap);
262         va_end(ap);
263
264         fprintf(stderr, "%.*s", (int)outlen, out);
265         printf("To reproduce: --failpath=%s\n", path);
266         free(path);
267         tell_parent(FAILURE);
268         exit(1);
269 }
270
271 static void trace(const char *fmt, ...)
272 {
273         va_list ap;
274
275         if (tracefd == -1)
276                 return;
277
278         va_start(ap, fmt);
279         vdprintf(tracefd, fmt, ap);
280         va_end(ap);
281 }
282
283 static pid_t child;
284
285 static void hand_down(int signum)
286 {
287         kill(child, signum);
288 }
289
290 static void release_locks(void)
291 {
292         /* Locks were never acquired/reacquired? */
293         if (lock_owner == 0)
294                 return;
295
296         /* We own them?  Release them all. */
297         if (lock_owner == getpid()) {
298                 unsigned int i;
299                 struct flock fl;
300                 fl.l_type = F_UNLCK;
301                 fl.l_whence = SEEK_SET;
302                 fl.l_start = 0;
303                 fl.l_len = 0;
304
305                 for (i = 0; i < lock_num; i++)
306                         fcntl(locks[i].fd, F_SETLK, &fl);
307         } else {
308                 /* Our parent must have them; pass request up. */
309                 enum info_type type = RELEASE_LOCKS;
310                 assert(control_fd != -1);
311                 write_all(control_fd, &type, sizeof(type));
312         }
313         lock_owner = 0;
314 }
315
316 /* off_t is a signed type.  Getting its max is non-trivial. */
317 static off_t off_max(void)
318 {
319         BUILD_ASSERT(sizeof(off_t) == 4 || sizeof(off_t) == 8);
320         if (sizeof(off_t) == 4)
321                 return (off_t)0x7FFFFFF;
322         else
323                 return (off_t)0x7FFFFFFFFFFFFFFULL;
324 }
325
326 static void get_locks(void)
327 {
328         unsigned int i;
329         struct flock fl;
330
331         if (lock_owner == getpid())
332                 return;
333
334         if (lock_owner != 0) {
335                 enum info_type type = RELEASE_LOCKS;
336                 assert(control_fd != -1);
337                 write_all(control_fd, &type, sizeof(type));
338         }
339
340         fl.l_whence = SEEK_SET;
341
342         for (i = 0; i < lock_num; i++) {
343                 fl.l_type = locks[i].type;
344                 fl.l_start = locks[i].start;
345                 if (locks[i].end == off_max())
346                         fl.l_len = 0;
347                 else
348                         fl.l_len = locks[i].end - locks[i].start + 1;
349
350                 if (fcntl(locks[i].fd, F_SETLKW, &fl) != 0)
351                         abort();
352         }
353         lock_owner = getpid();
354 }
355
356 struct saved_file {
357         struct saved_file *next;
358         int fd;
359         void *contents;
360         off_t off, len;
361 };
362
363 static struct saved_file *save_file(struct saved_file *next, int fd)
364 {
365         struct saved_file *s = malloc(sizeof(*s));
366
367         s->next = next;
368         s->fd = fd;
369         s->off = lseek(fd, 0, SEEK_CUR);
370         /* Special file?  Erk... */
371         assert(s->off != -1);
372         s->len = lseek(fd, 0, SEEK_END);
373         lseek(fd, 0, SEEK_SET);
374         s->contents = malloc(s->len);
375         if (read(fd, s->contents, s->len) != s->len)
376                 err(1, "Failed to save %zu bytes", (size_t)s->len);
377         lseek(fd, s->off, SEEK_SET);
378         return s;
379 }
380         
381 /* We have little choice but to save and restore open files: mmap means we
382  * can really intercept changes in the child.
383  *
384  * We could do non-mmap'ed files on demand, however. */
385 static struct saved_file *save_files(void)
386 {
387         struct saved_file *files = NULL;
388         struct failtest_call *i;
389
390         /* Figure out the set of live fds. */
391         tlist_for_each_rev(&history, i, list) {
392                 if (i->type == FAILTEST_OPEN) {
393                         int fd = i->u.open.ret;
394                         /* Only do successful, writable fds. */
395                         if (fd < 0)
396                                 continue;
397
398                         /* If it was closed, cleanup == NULL. */
399                         if (!i->cleanup)
400                                 continue;
401
402                         if ((i->u.open.flags & O_RDWR) == O_RDWR) {
403                                 files = save_file(files, fd);
404                         } else if ((i->u.open.flags & O_WRONLY)
405                                    == O_WRONLY) {
406                                 /* FIXME: Handle O_WRONLY.  Open with O_RDWR? */
407                                 abort();
408                         }
409                 }
410         }
411
412         return files;
413 }
414
415 static void restore_files(struct saved_file *s)
416 {
417         while (s) {
418                 struct saved_file *next = s->next;
419
420                 lseek(s->fd, 0, SEEK_SET);
421                 if (write(s->fd, s->contents, s->len) != s->len)
422                         err(1, "Failed to restore %zu bytes", (size_t)s->len);
423                 if (ftruncate(s->fd, s->len) != 0)
424                         err(1, "Failed to trim file to length %zu",
425                             (size_t)s->len);
426                 free(s->contents);
427                 lseek(s->fd, s->off, SEEK_SET);
428                 free(s);
429                 s = next;
430         }
431 }
432
433 static void free_files(struct saved_file *s)
434 {
435         while (s) {
436                 struct saved_file *next = s->next;
437                 free(s->contents);
438                 free(s);
439                 s = next;
440         }
441 }
442
443 static void free_call(struct failtest_call *call)
444 {
445         /* We don't do this in cleanup: needed even for failed opens. */
446         if (call->type == FAILTEST_OPEN)
447                 free((char *)call->u.open.pathname);
448         free(call->backtrace);
449         tlist_del_from(&history, call, list);
450         free(call);
451 }
452
453 /* Free up memory, so valgrind doesn't report leaks. */
454 static void free_everything(void)
455 {
456         struct failtest_call *i;
457
458         while ((i = tlist_top(&history, struct failtest_call, list)) != NULL)
459                 free_call(i);
460
461         failtable_clear(&failtable);
462 }
463
464 static NORETURN void failtest_cleanup(bool forced_cleanup, int status)
465 {
466         struct failtest_call *i;
467
468         /* For children, we don't care if they "failed" the testing. */
469         if (control_fd != -1)
470                 status = 0;
471
472         if (forced_cleanup) {
473                 /* We didn't actually do final operation: remove it. */
474                 i = tlist_tail(&history, struct failtest_call, list);
475                 free_call(i);
476         }
477
478         /* Cleanup everything, in reverse order. */
479         tlist_for_each_rev(&history, i, list) {
480                 if (!i->cleanup)
481                         continue;
482                 if (!forced_cleanup) {
483                         printf("Leak at %s:%u: --failpath=%s\n",
484                                i->file, i->line, failpath_string());
485                         status = 1;
486                 }
487                 i->cleanup(&i->u);
488         }
489
490         free_everything();
491         if (status == 0)
492                 tell_parent(SUCCESS);
493         else
494                 tell_parent(FAILURE);
495         exit(status);
496 }
497
498 static bool should_fail(struct failtest_call *call)
499 {
500         int status;
501         int control[2], output[2];
502         enum info_type type = UNEXPECTED;
503         char *out = NULL;
504         size_t outlen = 0;
505         struct saved_file *files;
506         struct failtest_call *dup;
507
508         if (call == &unrecorded_call)
509                 return false;
510
511         if (failpath) {
512                 /* + means continue after end, like normal. */
513                 if (*failpath == '+')
514                         failpath = NULL;
515                 else if (*failpath == '\0') {
516                         /* Continue, but don't inject errors. */
517                         return call->fail = false;
518                 } else {
519                         if (tolower((unsigned char)*failpath)
520                             != info_to_arg[call->type])
521                                 errx(1, "Failpath expected '%c' got '%c'\n",
522                                      info_to_arg[call->type], *failpath);
523                         call->fail = cisupper(*(failpath++));
524                         return call->fail;
525                 }
526         }
527
528         /* Attach debugger if they asked for it. */
529         if (debugpath) {
530                 char *path;
531
532                 /* Pretend this last call matches whatever path wanted:
533                  * keeps valgrind happy. */
534                 call->fail = cisupper(debugpath[strlen(debugpath)-1]);
535                 path = failpath_string();
536
537                 if (streq(path, debugpath)) {
538                         char str[80];
539
540                         /* Don't timeout. */
541                         signal(SIGUSR1, SIG_IGN);
542                         sprintf(str, "xterm -e gdb /proc/%d/exe %d &",
543                                 getpid(), getpid());
544                         if (system(str) == 0)
545                                 sleep(5);
546                 } else {
547                         /* Ignore last character: could be upper or lower. */
548                         path[strlen(path)-1] = '\0';
549                         if (!strstarts(debugpath, path)) {
550                                 fprintf(stderr,
551                                         "--debugpath not followed: %s\n", path);
552                                 debugpath = NULL;
553                         }
554                 }
555                 free(path);
556         }
557
558         /* Are we probing?  If so, we never fail twice. */
559         if (probing)
560                 return call->fail = false;
561
562         /* Don't more than once in the same place. */
563         dup = failtable_get(&failtable, call);
564         if (dup)
565                 return call->fail = false;
566
567         if (failtest_hook) {
568                 switch (failtest_hook(&history)) {
569                 case FAIL_OK:
570                         break;
571                 case FAIL_PROBE:
572                         probing = true;
573                         break;
574                 case FAIL_DONT_FAIL:
575                         call->fail = false;
576                         return false;
577                 default:
578                         abort();
579                 }
580         }
581
582         /* Add it to our table of calls. */
583         failtable_add(&failtable, call);
584
585         files = save_files();
586
587         /* We're going to fail in the child. */
588         call->fail = true;
589         if (pipe(control) != 0 || pipe(output) != 0)
590                 err(1, "opening pipe");
591
592         /* Prevent double-printing (in child and parent) */
593         fflush(stdout);
594         child = fork();
595         if (child == -1)
596                 err(1, "forking failed");
597
598         if (child == 0) {
599                 if (tracefd != -1) {
600                         struct timeval diff;
601                         const char *p;
602                         char *failpath;
603                         struct failtest_call *c;
604
605                         c = tlist_tail(&history, struct failtest_call, list);
606                         diff = time_sub(time_now(), start);
607                         failpath = failpath_string();
608                         trace("%u->%u (%u.%02u): %s (", getppid(), getpid(),
609                               (int)diff.tv_sec, (int)diff.tv_usec / 10000,
610                               failpath);
611                         free(failpath);
612                         p = strrchr(c->file, '/');
613                         if (p)
614                                 trace("%s", p+1);
615                         else
616                                 trace("%s", c->file);
617                         trace(":%u)\n", c->line);
618                 }
619                 close(control[0]);
620                 close(output[0]);
621                 dup2(output[1], STDOUT_FILENO);
622                 dup2(output[1], STDERR_FILENO);
623                 if (output[1] != STDOUT_FILENO && output[1] != STDERR_FILENO)
624                         close(output[1]);
625                 control_fd = move_fd_to_high(control[1]);
626                 /* Valgrind spots the leak if we don't free these. */
627                 free_files(files);
628                 return true;
629         }
630
631         signal(SIGUSR1, hand_down);
632
633         close(control[1]);
634         close(output[1]);
635
636         /* We grab output so we can display it; we grab writes so we
637          * can compare. */
638         do {
639                 struct pollfd pfd[2];
640                 int ret;
641
642                 pfd[0].fd = output[0];
643                 pfd[0].events = POLLIN|POLLHUP;
644                 pfd[1].fd = control[0];
645                 pfd[1].events = POLLIN|POLLHUP;
646
647                 if (type == SUCCESS)
648                         ret = poll(pfd, 1, failtest_timeout_ms);
649                 else
650                         ret = poll(pfd, 2, failtest_timeout_ms);
651
652                 if (ret == 0)
653                         hand_down(SIGUSR1);
654                 if (ret < 0) {
655                         if (errno == EINTR)
656                                 continue;
657                         err(1, "Poll returned %i", ret);
658                 }
659
660                 if (pfd[0].revents & POLLIN) {
661                         ssize_t len;
662
663                         out = realloc(out, outlen + 8192);
664                         len = read(output[0], out + outlen, 8192);
665                         outlen += len;
666                 } else if (type != SUCCESS && (pfd[1].revents & POLLIN)) {
667                         if (read_all(control[0], &type, sizeof(type))) {
668                                 if (type == WRITE) {
669                                         if (!read_write_info(control[0]))
670                                                 break;
671                                 } else if (type == RELEASE_LOCKS) {
672                                         release_locks();
673                                         /* FIXME: Tell them we're done... */
674                                 }
675                         }
676                 } else if (pfd[0].revents & POLLHUP) {
677                         break;
678                 }
679         } while (type != FAILURE);
680
681         close(output[0]);
682         close(control[0]);
683         waitpid(child, &status, 0);
684         if (!WIFEXITED(status)) {
685                 if (WTERMSIG(status) == SIGUSR1)
686                         child_fail(out, outlen, "Timed out");
687                 else
688                         child_fail(out, outlen, "Killed by signal %u: ",
689                                    WTERMSIG(status));
690         }
691         /* Child printed failure already, just pass up exit code. */
692         if (type == FAILURE) {
693                 fprintf(stderr, "%.*s", (int)outlen, out);
694                 tell_parent(type);
695                 exit(WEXITSTATUS(status) ? WEXITSTATUS(status) : 1);
696         }
697         if (WEXITSTATUS(status) != 0)
698                 child_fail(out, outlen, "Exited with status %i: ",
699                            WEXITSTATUS(status));
700
701         free(out);
702         signal(SIGUSR1, SIG_DFL);
703
704         restore_files(files);
705
706         /* Only child does probe. */
707         probing = false;
708
709         /* We continue onwards without failing. */
710         call->fail = false;
711         return false;
712 }
713
714 static void cleanup_calloc(struct calloc_call *call)
715 {
716         free(call->ret);
717 }
718
719 void *failtest_calloc(size_t nmemb, size_t size,
720                       const char *file, unsigned line)
721 {
722         struct failtest_call *p;
723         struct calloc_call call;
724         call.nmemb = nmemb;
725         call.size = size;
726         p = add_history(FAILTEST_CALLOC, file, line, &call);
727
728         if (should_fail(p)) {
729                 p->u.calloc.ret = NULL;
730                 p->error = ENOMEM;
731         } else {
732                 p->u.calloc.ret = calloc(nmemb, size);
733                 set_cleanup(p, cleanup_calloc, struct calloc_call);
734         }
735         errno = p->error;
736         return p->u.calloc.ret;
737 }
738
739 static void cleanup_malloc(struct malloc_call *call)
740 {
741         free(call->ret);
742 }
743
744 void *failtest_malloc(size_t size, const char *file, unsigned line)
745 {
746         struct failtest_call *p;
747         struct malloc_call call;
748         call.size = size;
749
750         p = add_history(FAILTEST_MALLOC, file, line, &call);
751         if (should_fail(p)) {
752                 p->u.malloc.ret = NULL;
753                 p->error = ENOMEM;
754         } else {
755                 p->u.malloc.ret = malloc(size);
756                 set_cleanup(p, cleanup_malloc, struct malloc_call);
757         }
758         errno = p->error;
759         return p->u.malloc.ret;
760 }
761
762 static void cleanup_realloc(struct realloc_call *call)
763 {
764         free(call->ret);
765 }
766
767 /* Walk back and find out if we got this ptr from a previous routine. */
768 static void fixup_ptr_history(void *ptr)
769 {
770         struct failtest_call *i;
771
772         /* Start at end of history, work back. */
773         tlist_for_each_rev(&history, i, list) {
774                 switch (i->type) {
775                 case FAILTEST_REALLOC:
776                         if (i->u.realloc.ret == ptr) {
777                                 i->cleanup = NULL;
778                                 return;
779                         }
780                         break;
781                 case FAILTEST_MALLOC:
782                         if (i->u.malloc.ret == ptr) {
783                                 i->cleanup = NULL;
784                                 return;
785                         }
786                         break;
787                 case FAILTEST_CALLOC:
788                         if (i->u.calloc.ret == ptr) {
789                                 i->cleanup = NULL;
790                                 return;
791                         }
792                         break;
793                 default:
794                         break;
795                 }
796         }
797 }
798
799 void *failtest_realloc(void *ptr, size_t size, const char *file, unsigned line)
800 {
801         struct failtest_call *p;
802         struct realloc_call call;
803         call.size = size;
804         p = add_history(FAILTEST_REALLOC, file, line, &call);
805
806         /* FIXME: Try one child moving allocation, one not. */
807         if (should_fail(p)) {
808                 p->u.realloc.ret = NULL;
809                 p->error = ENOMEM;
810         } else {
811                 /* Don't catch this one in the history fixup... */
812                 p->u.realloc.ret = NULL;
813                 fixup_ptr_history(ptr);
814                 p->u.realloc.ret = realloc(ptr, size);
815                 set_cleanup(p, cleanup_realloc, struct realloc_call);
816         }
817         errno = p->error;
818         return p->u.realloc.ret;
819 }
820
821 void failtest_free(void *ptr)
822 {
823         fixup_ptr_history(ptr);
824         free(ptr);
825 }
826
827 static void cleanup_open(struct open_call *call)
828 {
829         close(call->ret);
830 }
831
832 int failtest_open(const char *pathname,
833                   const char *file, unsigned line, ...)
834 {
835         struct failtest_call *p;
836         struct open_call call;
837         va_list ap;
838
839         call.pathname = strdup(pathname);
840         va_start(ap, line);
841         call.flags = va_arg(ap, int);
842         if (call.flags & O_CREAT) {
843                 call.mode = va_arg(ap, int);
844                 va_end(ap);
845         }
846         p = add_history(FAILTEST_OPEN, file, line, &call);
847         /* Avoid memory leak! */
848         if (p == &unrecorded_call)
849                 free((char *)call.pathname);
850         p->u.open.ret = open(pathname, call.flags, call.mode);
851
852         if (p->u.open.ret == -1) {
853                 p->fail = false;
854                 p->error = errno;
855         } else if (should_fail(p)) {
856                 close(p->u.open.ret);
857                 p->u.open.ret = -1;
858                 /* FIXME: Play with error codes? */
859                 p->error = EACCES;
860         } else {
861                 set_cleanup(p, cleanup_open, struct open_call);
862         }
863         errno = p->error;
864         return p->u.open.ret;
865 }
866
867 void *failtest_mmap(void *addr, size_t length, int prot, int flags,
868                     int fd, off_t offset, const char *file, unsigned line)
869 {
870         struct failtest_call *p;
871         struct mmap_call call;
872
873         call.addr = addr;
874         call.length = length;
875         call.prot = prot;
876         call.flags = flags;
877         call.offset = offset;
878         call.fd = fd;
879
880         p = add_history(FAILTEST_MMAP, file, line, &call);
881         if (should_fail(p)) {
882                 p->u.mmap.ret = MAP_FAILED;
883                 p->error = ENOMEM;
884         } else {
885                 p->u.mmap.ret = mmap(addr, length, prot, flags, fd, offset);
886         }
887         errno = p->error;
888         return p->u.mmap.ret;
889 }
890
891 static void cleanup_pipe(struct pipe_call *call)
892 {
893         if (!call->closed[0])
894                 close(call->fds[0]);
895         if (!call->closed[1])
896                 close(call->fds[1]);
897 }
898
899 int failtest_pipe(int pipefd[2], const char *file, unsigned line)
900 {
901         struct failtest_call *p;
902         struct pipe_call call;
903
904         p = add_history(FAILTEST_PIPE, file, line, &call);
905         if (should_fail(p)) {
906                 p->u.open.ret = -1;
907                 /* FIXME: Play with error codes? */
908                 p->error = EMFILE;
909         } else {
910                 p->u.pipe.ret = pipe(p->u.pipe.fds);
911                 p->u.pipe.closed[0] = p->u.pipe.closed[1] = false;
912                 set_cleanup(p, cleanup_pipe, struct pipe_call);
913         }
914         /* This causes valgrind to notice if they use pipefd[] after failure */
915         memcpy(pipefd, p->u.pipe.fds, sizeof(p->u.pipe.fds));
916         errno = p->error;
917         return p->u.pipe.ret;
918 }
919
920 ssize_t failtest_pread(int fd, void *buf, size_t count, off_t off,
921                        const char *file, unsigned line)
922 {
923         struct failtest_call *p;
924         struct read_call call;
925         call.fd = fd;
926         call.buf = buf;
927         call.count = count;
928         call.off = off;
929         p = add_history(FAILTEST_READ, file, line, &call);
930
931         /* FIXME: Try partial read returns. */
932         if (should_fail(p)) {
933                 p->u.read.ret = -1;
934                 p->error = EIO;
935         } else {
936                 p->u.read.ret = pread(fd, buf, count, off);
937         }
938         errno = p->error;
939         return p->u.read.ret;
940 }
941
942 ssize_t failtest_pwrite(int fd, const void *buf, size_t count, off_t off,
943                         const char *file, unsigned line)
944 {
945         struct failtest_call *p;
946         struct write_call call;
947
948         call.fd = fd;
949         call.buf = buf;
950         call.count = count;
951         call.off = off;
952         p = add_history(FAILTEST_WRITE, file, line, &call);
953
954         /* If we're a child, we need to make sure we write the same thing
955          * to non-files as the parent does, so tell it. */
956         if (control_fd != -1 && off == (off_t)-1) {
957                 enum info_type type = WRITE;
958
959                 write_all(control_fd, &type, sizeof(type));
960                 write_all(control_fd, &p->u.write, sizeof(p->u.write));
961                 write_all(control_fd, buf, count);
962         }
963
964         /* FIXME: Try partial write returns. */
965         if (should_fail(p)) {
966                 p->u.write.ret = -1;
967                 p->error = EIO;
968         } else {
969                 /* FIXME: We assume same write order in parent and child */
970                 if (off == (off_t)-1 && child_writes_num != 0) {
971                         if (child_writes[0].fd != fd)
972                                 errx(1, "Child wrote to fd %u, not %u?",
973                                      child_writes[0].fd, fd);
974                         if (child_writes[0].off != p->u.write.off)
975                                 errx(1, "Child wrote to offset %zu, not %zu?",
976                                      (size_t)child_writes[0].off,
977                                      (size_t)p->u.write.off);
978                         if (child_writes[0].count != count)
979                                 errx(1, "Child wrote length %zu, not %zu?",
980                                      child_writes[0].count, count);
981                         if (memcmp(child_writes[0].buf, buf, count)) {
982                                 child_fail(NULL, 0,
983                                            "Child wrote differently to"
984                                            " fd %u than we did!\n", fd);
985                         }
986                         free((char *)child_writes[0].buf);
987                         child_writes_num--;
988                         memmove(&child_writes[0], &child_writes[1],
989                                 sizeof(child_writes[0]) * child_writes_num);
990
991                         /* Is this is a socket or pipe, child wrote it
992                            already. */
993                         if (p->u.write.off == (off_t)-1) {
994                                 p->u.write.ret = count;
995                                 errno = p->error;
996                                 return p->u.write.ret;
997                         }
998                 }
999                 p->u.write.ret = pwrite(fd, buf, count, off);
1000         }
1001         errno = p->error;
1002         return p->u.write.ret;
1003 }
1004
1005 ssize_t failtest_read(int fd, void *buf, size_t count,
1006                       const char *file, unsigned line)
1007 {
1008         return failtest_pread(fd, buf, count, lseek(fd, 0, SEEK_CUR),
1009                               file, line);
1010 }
1011
1012 ssize_t failtest_write(int fd, const void *buf, size_t count,
1013                        const char *file, unsigned line)
1014 {
1015         return failtest_pwrite(fd, buf, count, lseek(fd, 0, SEEK_CUR),
1016                                file, line);
1017 }
1018
1019 static struct lock_info *WARN_UNUSED_RESULT
1020 add_lock(struct lock_info *locks, int fd, off_t start, off_t end, int type)
1021 {
1022         unsigned int i;
1023         struct lock_info *l;
1024
1025         for (i = 0; i < lock_num; i++) {
1026                 l = &locks[i];
1027
1028                 if (l->fd != fd)
1029                         continue;
1030                 /* Four cases we care about:
1031                  * Start overlap:
1032                  *      l =    |      |
1033                  *      new = |   |
1034                  * Mid overlap:
1035                  *      l =    |      |
1036                  *      new =    |  |
1037                  * End overlap:
1038                  *      l =    |      |
1039                  *      new =      |    |
1040                  * Total overlap:
1041                  *      l =    |      |
1042                  *      new = |         |
1043                  */
1044                 if (start > l->start && end < l->end) {
1045                         /* Mid overlap: trim entry, add new one. */
1046                         off_t new_start, new_end;
1047                         new_start = end + 1;
1048                         new_end = l->end;
1049                         l->end = start - 1;
1050                         locks = add_lock(locks,
1051                                          fd, new_start, new_end, l->type);
1052                         l = &locks[i];
1053                 } else if (start <= l->start && end >= l->end) {
1054                         /* Total overlap: eliminate entry. */
1055                         l->end = 0;
1056                         l->start = 1;
1057                 } else if (end >= l->start && end < l->end) {
1058                         /* Start overlap: trim entry. */
1059                         l->start = end + 1;
1060                 } else if (start > l->start && start <= l->end) {
1061                         /* End overlap: trim entry. */
1062                         l->end = start-1;
1063                 }
1064                 /* Nothing left?  Remove it. */
1065                 if (l->end < l->start) {
1066                         memmove(l, l + 1, (--lock_num - i) * sizeof(l[0]));
1067                         i--;
1068                 }
1069         }
1070
1071         if (type != F_UNLCK) {
1072                 locks = realloc(locks, (lock_num + 1) * sizeof(*locks));
1073                 l = &locks[lock_num++];
1074                 l->fd = fd;
1075                 l->start = start;
1076                 l->end = end;
1077                 l->type = type;
1078         }
1079         return locks;
1080 }
1081
1082 /* We trap this so we can record it: we don't fail it. */
1083 int failtest_close(int fd, const char *file, unsigned line)
1084 {
1085         struct failtest_call *i;
1086         struct close_call call;
1087         struct failtest_call *p;
1088
1089         call.fd = fd;
1090         p = add_history(FAILTEST_CLOSE, file, line, &call);
1091         p->fail = false;
1092
1093         /* Consume close from failpath. */
1094         if (failpath)
1095                 if (should_fail(p))
1096                         abort();
1097
1098         if (fd < 0)
1099                 return close(fd);
1100
1101         /* Trace history to find source of fd. */
1102         tlist_for_each_rev(&history, i, list) {
1103                 switch (i->type) {
1104                 case FAILTEST_PIPE:
1105                         /* From a pipe? */
1106                         if (i->u.pipe.fds[0] == fd) {
1107                                 assert(!i->u.pipe.closed[0]);
1108                                 i->u.pipe.closed[0] = true;
1109                                 if (i->u.pipe.closed[1])
1110                                         i->cleanup = NULL;
1111                                 goto out;
1112                         }
1113                         if (i->u.pipe.fds[1] == fd) {
1114                                 assert(!i->u.pipe.closed[1]);
1115                                 i->u.pipe.closed[1] = true;
1116                                 if (i->u.pipe.closed[0])
1117                                         i->cleanup = NULL;
1118                                 goto out;
1119                         }
1120                         break;
1121                 case FAILTEST_OPEN:
1122                         if (i->u.open.ret == fd) {
1123                                 assert((void *)i->cleanup
1124                                        == (void *)cleanup_open);
1125                                 i->cleanup = NULL;
1126                                 goto out;
1127                         }
1128                         break;
1129                 default:
1130                         break;
1131                 }
1132         }
1133
1134 out:
1135         locks = add_lock(locks, fd, 0, off_max(), F_UNLCK);
1136         return close(fd);
1137 }
1138
1139 /* Zero length means "to end of file" */
1140 static off_t end_of(off_t start, off_t len)
1141 {
1142         if (len == 0)
1143                 return off_max();
1144         return start + len - 1;
1145 }
1146
1147 /* FIXME: This only handles locks, really. */
1148 int failtest_fcntl(int fd, const char *file, unsigned line, int cmd, ...)
1149 {
1150         struct failtest_call *p;
1151         struct fcntl_call call;
1152         va_list ap;
1153
1154         call.fd = fd;
1155         call.cmd = cmd;
1156
1157         /* Argument extraction. */
1158         switch (cmd) {
1159         case F_SETFL:
1160         case F_SETFD:
1161                 va_start(ap, cmd);
1162                 call.arg.l = va_arg(ap, long);
1163                 va_end(ap);
1164                 return fcntl(fd, cmd, call.arg.l);
1165         case F_GETFD:
1166         case F_GETFL:
1167                 return fcntl(fd, cmd);
1168         case F_GETLK:
1169                 get_locks();
1170                 va_start(ap, cmd);
1171                 call.arg.fl = *va_arg(ap, struct flock *);
1172                 va_end(ap);
1173                 return fcntl(fd, cmd, &call.arg.fl);
1174         case F_SETLK:
1175         case F_SETLKW:
1176                 va_start(ap, cmd);
1177                 call.arg.fl = *va_arg(ap, struct flock *);
1178                 va_end(ap);
1179                 break;
1180         default:
1181                 /* This means you need to implement it here. */
1182                 err(1, "failtest: unknown fcntl %u", cmd);
1183         }
1184
1185         p = add_history(FAILTEST_FCNTL, file, line, &call);
1186
1187         if (should_fail(p)) {
1188                 p->u.fcntl.ret = -1;
1189                 if (p->u.fcntl.cmd == F_SETLK)
1190                         p->error = EAGAIN;
1191                 else
1192                         p->error = EDEADLK;
1193         } else {
1194                 get_locks();
1195                 p->u.fcntl.ret = fcntl(p->u.fcntl.fd, p->u.fcntl.cmd,
1196                                        &p->u.fcntl.arg.fl);
1197                 if (p->u.fcntl.ret == -1)
1198                         p->error = errno;
1199                 else {
1200                         /* We don't handle anything else yet. */
1201                         assert(p->u.fcntl.arg.fl.l_whence == SEEK_SET);
1202                         locks = add_lock(locks,
1203                                          p->u.fcntl.fd,
1204                                          p->u.fcntl.arg.fl.l_start,
1205                                          end_of(p->u.fcntl.arg.fl.l_start,
1206                                                 p->u.fcntl.arg.fl.l_len),
1207                                          p->u.fcntl.arg.fl.l_type);
1208                 }
1209         }
1210         errno = p->error;
1211         return p->u.fcntl.ret;
1212 }
1213
1214 pid_t failtest_getpid(const char *file, unsigned line)
1215 {
1216         /* You must call failtest_init first! */
1217         assert(orig_pid);
1218         return orig_pid;
1219 }
1220         
1221 void failtest_init(int argc, char *argv[])
1222 {
1223         unsigned int i;
1224
1225         orig_pid = getpid();
1226
1227         warnfd = move_fd_to_high(dup(STDERR_FILENO));
1228         for (i = 1; i < argc; i++) {
1229                 if (!strncmp(argv[i], "--failpath=", strlen("--failpath="))) {
1230                         failpath = argv[i] + strlen("--failpath=");
1231                 } else if (strcmp(argv[i], "--tracepath") == 0) {
1232                         tracefd = warnfd;
1233                         failtest_timeout_ms = -1;
1234                 } else if (!strncmp(argv[i], "--debugpath=",
1235                                     strlen("--debugpath="))) {
1236                         debugpath = argv[i] + strlen("--debugpath=");
1237                 }
1238         }
1239         failtable_init(&failtable);
1240         start = time_now();
1241 }
1242
1243 bool failtest_has_failed(void)
1244 {
1245         return control_fd != -1;
1246 }
1247
1248 void failtest_exit(int status)
1249 {
1250         if (failtest_exit_check) {
1251                 if (!failtest_exit_check(&history))
1252                         child_fail(NULL, 0, "failtest_exit_check failed\n");
1253         }
1254
1255         failtest_cleanup(false, status);
1256 }