]> git.ozlabs.org Git - ccan/blob - ccan/failtest/failtest.c
failtest: don't use special data structure for storing child writes.
[ccan] / ccan / failtest / failtest.c
1 #include <stdarg.h>
2 #include <string.h>
3 #include <stdio.h>
4 #include <stdarg.h>
5 #include <ctype.h>
6 #include <err.h>
7 #include <unistd.h>
8 #include <poll.h>
9 #include <errno.h>
10 #include <sys/types.h>
11 #include <sys/wait.h>
12 #include <sys/stat.h>
13 #include <sys/time.h>
14 #include <assert.h>
15 #include <ccan/read_write_all/read_write_all.h>
16 #include <ccan/failtest/failtest_proto.h>
17 #include <ccan/failtest/failtest.h>
18 #include <ccan/build_assert/build_assert.h>
19
20 bool (*failtest_hook)(struct failtest_call *history, unsigned num)
21 = failtest_default_hook;
22
23 static int tracefd = -1;
24
25 unsigned int failtest_timeout_ms = 20000;
26
27 const char *failpath;
28
29 enum info_type {
30         WRITE,
31         RELEASE_LOCKS,
32         FAILURE,
33         SUCCESS,
34         UNEXPECTED
35 };
36
37 struct lock_info {
38         int fd;
39         /* end is inclusive: you can't have a 0-byte lock. */
40         off_t start, end;
41         int type;
42 };
43
44 bool (*failtest_exit_check)(struct failtest_call *history, unsigned num);
45
46 static struct failtest_call *history = NULL;
47 static unsigned int history_num = 0;
48 static int control_fd = -1;
49 static struct timeval start;
50
51 static struct write_call *child_writes = NULL;
52 static unsigned int child_writes_num = 0;
53
54 static pid_t lock_owner;
55 static struct lock_info *locks = NULL;
56 static unsigned int lock_num = 0;
57
58 static const char info_to_arg[] = "mceoprwf";
59
60 /* Dummy call used for failtest_undo wrappers. */
61 static struct failtest_call unrecorded_call;
62
63 static struct failtest_call *add_history_(enum failtest_call_type type,
64                                           const char *file,
65                                           unsigned int line,
66                                           const void *elem,
67                                           size_t elem_size)
68 {
69         /* NULL file is how we suppress failure. */
70         if (!file)
71                 return &unrecorded_call;
72
73         history = realloc(history, (history_num + 1) * sizeof(*history));
74         history[history_num].type = type;
75         history[history_num].file = file;
76         history[history_num].line = line;
77         history[history_num].cleanup = NULL;
78         memcpy(&history[history_num].u, elem, elem_size);
79         return &history[history_num++];
80 }
81
82 #define add_history(type, file, line, elem) \
83         add_history_((type), (file), (line), (elem), sizeof(*(elem)))
84
85 #define set_cleanup(call, clean, type)                  \
86         (call)->cleanup = (void *)((void)sizeof(clean((type *)NULL)), (clean))
87
88 bool failtest_default_hook(struct failtest_call *history, unsigned num)
89 {
90         return true;
91 }
92
93 static bool read_write_info(int fd)
94 {
95         struct write_call *w;
96         char *buf;
97
98         /* We don't need all of this, but it's simple. */
99         child_writes = realloc(child_writes,
100                                (child_writes_num+1) * sizeof(child_writes[0]));
101         w = &child_writes[child_writes_num];
102         if (!read_all(fd, w, sizeof(*w)))
103                 return false;
104
105         w->buf = buf = malloc(w->count);
106         if (!read_all(fd, buf, w->count))
107                 return false;
108
109         child_writes_num++;
110         return true;
111 }
112
113 static char *failpath_string(void)
114 {
115         unsigned int i;
116         char *ret = malloc(history_num + 1);
117
118         for (i = 0; i < history_num; i++) {
119                 ret[i] = info_to_arg[history[i].type];
120                 if (history[i].fail)
121                         ret[i] = toupper(ret[i]);
122         }
123         ret[i] = '\0';
124         return ret;
125 }
126
127 static void tell_parent(enum info_type type)
128 {
129         if (control_fd != -1)
130                 write_all(control_fd, &type, sizeof(type));
131 }
132
133 static void child_fail(const char *out, size_t outlen, const char *fmt, ...)
134 {
135         va_list ap;
136         char *path = failpath_string();
137
138         va_start(ap, fmt);
139         vfprintf(stderr, fmt, ap);
140         va_end(ap);
141
142         fprintf(stderr, "%.*s", (int)outlen, out);
143         printf("To reproduce: --failpath=%s\n", path);
144         free(path);
145         tell_parent(FAILURE);
146         exit(1);
147 }
148
149 static pid_t child;
150
151 static void hand_down(int signal)
152 {
153         kill(child, signal);
154 }
155
156 static void release_locks(void)
157 {
158         /* Locks were never acquired/reacquired? */
159         if (lock_owner == 0)
160                 return;
161
162         /* We own them?  Release them all. */
163         if (lock_owner == getpid()) {
164                 unsigned int i;
165                 struct flock fl;
166                 fl.l_type = F_UNLCK;
167                 fl.l_whence = SEEK_SET;
168                 fl.l_start = 0;
169                 fl.l_len = 0;
170
171                 for (i = 0; i < lock_num; i++)
172                         fcntl(locks[i].fd, F_SETLK, &fl);
173         } else {
174                 /* Our parent must have them; pass request up. */
175                 enum info_type type = RELEASE_LOCKS;
176                 assert(control_fd != -1);
177                 write_all(control_fd, &type, sizeof(type));
178         }
179         lock_owner = 0;
180 }
181
182 /* off_t is a signed type.  Getting its max is non-trivial. */
183 static off_t off_max(void)
184 {
185         BUILD_ASSERT(sizeof(off_t) == 4 || sizeof(off_t) == 8);
186         if (sizeof(off_t) == 4)
187                 return (off_t)0x7FFFFFF;
188         else
189                 return (off_t)0x7FFFFFFFFFFFFFFULL;
190 }
191
192 static void get_locks(void)
193 {
194         unsigned int i;
195         struct flock fl;
196
197         if (lock_owner == getpid())
198                 return;
199
200         if (lock_owner != 0) {
201                 enum info_type type = RELEASE_LOCKS;
202                 assert(control_fd != -1);
203                 write_all(control_fd, &type, sizeof(type));
204         }
205
206         fl.l_whence = SEEK_SET;
207
208         for (i = 0; i < lock_num; i++) {
209                 fl.l_type = locks[i].type;
210                 fl.l_start = locks[i].start;
211                 if (locks[i].end == off_max())
212                         fl.l_len = 0;
213                 else
214                         fl.l_len = locks[i].end - locks[i].start + 1;
215
216                 if (fcntl(locks[i].fd, F_SETLKW, &fl) != 0)
217                         abort();
218         }
219         lock_owner = getpid();
220 }
221
222 static void trace_str(const char *str)
223 {
224         ssize_t ret;
225
226         while ((ret = write(tracefd, str, strlen(str))) <= 0) {
227                 str += ret;
228                 if (!*str)
229                         return;
230         }
231         err(1, "Writing trace.");
232 }
233
234 static bool should_fail(struct failtest_call *call)
235 {
236         int status;
237         int control[2], output[2];
238         enum info_type type = UNEXPECTED;
239         char *out = NULL;
240         size_t outlen = 0;
241
242         if (call == &unrecorded_call)
243                 return false;
244
245         if (failpath) {
246                 /* + means continue after end, like normal. */
247                 if (*failpath == '+')
248                         failpath = NULL;
249                 else {
250                         if (tolower(*failpath) != info_to_arg[call->type])
251                                 errx(1, "Failpath expected '%c' got '%c'\n",
252                                      info_to_arg[call->type], *failpath);
253                         call->fail = isupper(*(failpath++));
254                         return call->fail;
255                 }
256         }
257
258         if (!failtest_hook(history, history_num)) {
259                 call->fail = false;
260                 return false;
261         }
262
263         /* We're going to fail in the child. */
264         call->fail = true;
265         if (pipe(control) != 0 || pipe(output) != 0)
266                 err(1, "opening pipe");
267
268         /* Prevent double-printing (in child and parent) */
269         fflush(stdout);
270         child = fork();
271         if (child == -1)
272                 err(1, "forking failed");
273
274         if (child == 0) {
275                 if (tracefd != -1) {
276                         struct timeval now;
277                         char str[50], *p;
278                         gettimeofday(&now, NULL);
279                         if (now.tv_usec < start.tv_usec) {
280                                 now.tv_sec--;
281                                 now.tv_usec += 1000000;
282                         }
283                         now.tv_usec -= start.tv_usec;
284                         now.tv_sec -= start.tv_sec;
285                         sprintf(str, "%u (%u.%02u): ", getpid(),
286                                 (int)now.tv_sec, (int)now.tv_usec / 10000);
287                         trace_str(str);
288                         p = failpath_string();
289                         trace_str(p);
290                         free(p);
291                         trace_str("(");
292                         p = strchr(history[history_num-1].file, '/');
293                         if (p)
294                                 trace_str(p+1);
295                         else
296                                 trace_str(history[history_num-1].file);
297                         sprintf(str, ":%u)\n", history[history_num-1].line);
298                         trace_str(str);
299                 }
300                 close(control[0]);
301                 close(output[0]);
302                 dup2(output[1], STDOUT_FILENO);
303                 dup2(output[1], STDERR_FILENO);
304                 if (output[1] != STDOUT_FILENO && output[1] != STDERR_FILENO)
305                         close(output[1]);
306                 control_fd = control[1];
307                 return true;
308         }
309
310         signal(SIGUSR1, hand_down);
311
312         close(control[1]);
313         close(output[1]);
314
315         /* We grab output so we can display it; we grab writes so we
316          * can compare. */
317         do {
318                 struct pollfd pfd[2];
319                 int ret;
320
321                 pfd[0].fd = output[0];
322                 pfd[0].events = POLLIN|POLLHUP;
323                 pfd[1].fd = control[0];
324                 pfd[1].events = POLLIN|POLLHUP;
325
326                 if (type == SUCCESS)
327                         ret = poll(pfd, 1, failtest_timeout_ms);
328                 else
329                         ret = poll(pfd, 2, failtest_timeout_ms);
330
331                 if (ret <= 0)
332                         hand_down(SIGUSR1);
333
334                 if (pfd[0].revents & POLLIN) {
335                         ssize_t len;
336
337                         out = realloc(out, outlen + 8192);
338                         len = read(output[0], out + outlen, 8192);
339                         outlen += len;
340                 } else if (type != SUCCESS && (pfd[1].revents & POLLIN)) {
341                         if (read_all(control[0], &type, sizeof(type))) {
342                                 if (type == WRITE) {
343                                         if (!read_write_info(control[0]))
344                                                 break;
345                                 } else if (type == RELEASE_LOCKS) {
346                                         release_locks();
347                                         /* FIXME: Tell them we're done... */
348                                 }
349                         }
350                 } else if (pfd[0].revents & POLLHUP) {
351                         break;
352                 }
353         } while (type != FAILURE);
354
355         close(output[0]);
356         close(control[0]);
357         waitpid(child, &status, 0);
358         if (!WIFEXITED(status))
359                 child_fail(out, outlen, "Killed by signal %u: ",
360                            WTERMSIG(status));
361         /* Child printed failure already, just pass up exit code. */
362         if (type == FAILURE) {
363                 fprintf(stderr, "%.*s", (int)outlen, out);
364                 tell_parent(type);
365                 exit(WEXITSTATUS(status) ? WEXITSTATUS(status) : 1);
366         }
367         if (WEXITSTATUS(status) != 0)
368                 child_fail(out, outlen, "Exited with status %i: ",
369                            WEXITSTATUS(status));
370
371         free(out);
372         signal(SIGUSR1, SIG_DFL);
373
374         /* We continue onwards without failing. */
375         call->fail = false;
376         return false;
377 }
378
379 static void cleanup_calloc(struct calloc_call *call)
380 {
381         free(call->ret);
382 }
383
384 void *failtest_calloc(size_t nmemb, size_t size,
385                       const char *file, unsigned line)
386 {
387         struct failtest_call *p;
388         struct calloc_call call;
389         call.nmemb = nmemb;
390         call.size = size;
391         p = add_history(FAILTEST_CALLOC, file, line, &call);
392
393         if (should_fail(p)) {
394                 p->u.calloc.ret = NULL;
395                 p->error = ENOMEM;
396         } else {
397                 p->u.calloc.ret = calloc(nmemb, size);
398                 set_cleanup(p, cleanup_calloc, struct calloc_call);
399         }
400         errno = p->error;
401         return p->u.calloc.ret;
402 }
403
404 static void cleanup_malloc(struct malloc_call *call)
405 {
406         free(call->ret);
407 }
408
409 void *failtest_malloc(size_t size, const char *file, unsigned line)
410 {
411         struct failtest_call *p;
412         struct malloc_call call;
413         call.size = size;
414
415         p = add_history(FAILTEST_MALLOC, file, line, &call);
416         if (should_fail(p)) {
417                 p->u.calloc.ret = NULL;
418                 p->error = ENOMEM;
419         } else {
420                 p->u.calloc.ret = malloc(size);
421                 set_cleanup(p, cleanup_malloc, struct malloc_call);
422         }
423         errno = p->error;
424         return p->u.calloc.ret;
425 }
426
427 static void cleanup_realloc(struct realloc_call *call)
428 {
429         free(call->ret);
430 }
431
432 /* Walk back and find out if we got this ptr from a previous routine. */
433 static void fixup_ptr_history(void *ptr, unsigned int last)
434 {
435         int i;
436
437         /* Start at end of history, work back. */
438         for (i = last - 1; i >= 0; i--) {
439                 switch (history[i].type) {
440                 case FAILTEST_REALLOC:
441                         if (history[i].u.realloc.ret == ptr) {
442                                 history[i].cleanup = NULL;
443                                 return;
444                         }
445                         break;
446                 case FAILTEST_MALLOC:
447                         if (history[i].u.malloc.ret == ptr) {
448                                 history[i].cleanup = NULL;
449                                 return;
450                         }
451                         break;
452                 case FAILTEST_CALLOC:
453                         if (history[i].u.calloc.ret == ptr) {
454                                 history[i].cleanup = NULL;
455                                 return;
456                         }
457                         break;
458                 default:
459                         break;
460                 }
461         }
462 }
463
464 void *failtest_realloc(void *ptr, size_t size, const char *file, unsigned line)
465 {
466         struct failtest_call *p;
467         struct realloc_call call;
468         call.size = size;
469         p = add_history(FAILTEST_REALLOC, file, line, &call);
470
471         /* FIXME: Try one child moving allocation, one not. */
472         if (should_fail(p)) {
473                 p->u.realloc.ret = NULL;
474                 p->error = ENOMEM;
475         } else {
476                 fixup_ptr_history(ptr, history_num-1);
477                 p->u.realloc.ret = realloc(ptr, size);
478                 set_cleanup(p, cleanup_realloc, struct realloc_call);
479         }
480         errno = p->error;
481         return p->u.realloc.ret;
482 }
483
484 void failtest_free(void *ptr)
485 {
486         fixup_ptr_history(ptr, history_num);
487         free(ptr);
488 }
489
490 static void cleanup_open(struct open_call *call)
491 {
492         close(call->ret);
493 }
494
495 int failtest_open(const char *pathname,
496                   const char *file, unsigned line, ...)
497 {
498         struct failtest_call *p;
499         struct open_call call;
500         va_list ap;
501
502         call.pathname = strdup(pathname);
503         va_start(ap, line);
504         call.flags = va_arg(ap, int);
505         if (call.flags & O_CREAT) {
506                 call.mode = va_arg(ap, mode_t);
507                 va_end(ap);
508         }
509         p = add_history(FAILTEST_OPEN, file, line, &call);
510         /* Avoid memory leak! */
511         if (p == &unrecorded_call)
512                 free((char *)call.pathname);
513         if (should_fail(p)) {
514                 p->u.open.ret = -1;
515                 /* FIXME: Play with error codes? */
516                 p->error = EACCES;
517         } else {
518                 p->u.open.ret = open(pathname, call.flags, call.mode);
519                 set_cleanup(p, cleanup_open, struct open_call);
520                 p->u.open.dup_fd = p->u.open.ret;
521         }
522         errno = p->error;
523         return p->u.open.ret;
524 }
525
526 static void cleanup_pipe(struct pipe_call *call)
527 {
528         if (!call->closed[0])
529                 close(call->fds[0]);
530         if (!call->closed[1])
531                 close(call->fds[1]);
532 }
533
534 int failtest_pipe(int pipefd[2], const char *file, unsigned line)
535 {
536         struct failtest_call *p;
537         struct pipe_call call;
538
539         p = add_history(FAILTEST_PIPE, file, line, &call);
540         if (should_fail(p)) {
541                 p->u.open.ret = -1;
542                 /* FIXME: Play with error codes? */
543                 p->error = EMFILE;
544         } else {
545                 p->u.pipe.ret = pipe(p->u.pipe.fds);
546                 p->u.pipe.closed[0] = p->u.pipe.closed[1] = false;
547                 set_cleanup(p, cleanup_pipe, struct pipe_call);
548         }
549         /* This causes valgrind to notice if they use pipefd[] after failure */
550         memcpy(pipefd, p->u.pipe.fds, sizeof(p->u.pipe.fds));
551         errno = p->error;
552         return p->u.pipe.ret;
553 }
554
555 static void cleanup_read(struct read_call *call)
556 {
557         lseek(call->fd, call->off, SEEK_SET);
558 }
559
560 ssize_t failtest_pread(int fd, void *buf, size_t count, off_t off,
561                        const char *file, unsigned line)
562 {
563         struct failtest_call *p;
564         struct read_call call;
565         call.fd = fd;
566         call.buf = buf;
567         call.count = count;
568         call.off = off;
569         p = add_history(FAILTEST_READ, file, line, &call);
570
571         /* FIXME: Try partial read returns. */
572         if (should_fail(p)) {
573                 p->u.read.ret = -1;
574                 p->error = EIO;
575         } else {
576                 p->u.read.ret = pread(fd, buf, count, off);
577                 set_cleanup(p, cleanup_read, struct read_call);
578         }
579         errno = p->error;
580         return p->u.read.ret;
581 }
582
583 static void cleanup_write(struct write_call *call)
584 {
585         lseek(call->dup_fd, call->off, SEEK_SET);
586         write(call->dup_fd, call->saved_contents, call->saved_len);
587         lseek(call->dup_fd, call->off, SEEK_SET);
588         ftruncate(call->dup_fd, call->old_filelen);
589         free(call->saved_contents);
590 }
591
592 ssize_t failtest_pwrite(int fd, const void *buf, size_t count, off_t off,
593                         const char *file, unsigned line)
594 {
595         struct failtest_call *p;
596         struct write_call call;
597
598         call.fd = call.dup_fd = fd;
599         call.buf = buf;
600         call.count = count;
601         call.off = off;
602         p = add_history(FAILTEST_WRITE, file, line, &call);
603
604         /* Save old contents if we can */
605         if (p->u.write.off != -1) {
606                 ssize_t ret;
607                 p->u.write.old_filelen = lseek(fd, 0, SEEK_END);
608
609                 /* Write past end of file?  Nothing to save.*/
610                 if (p->u.write.old_filelen <= p->u.write.off)
611                         p->u.write.saved_len = 0;
612                 /* Write which goes over end of file?  Partial save. */
613                 else if (p->u.write.off + count > p->u.write.old_filelen)
614                         p->u.write.saved_len = p->u.write.old_filelen
615                                 - p->u.write.off;
616                 /* Full save. */
617                 else
618                         p->u.write.saved_len = count;
619
620                 p->u.write.saved_contents = malloc(p->u.write.saved_len);
621                 lseek(fd, p->u.write.off, SEEK_SET);
622                 ret = read(fd, p->u.write.saved_contents, p->u.write.saved_len);
623                 if (ret != p->u.write.saved_len)
624                         err(1, "Expected %i bytes, got %i",
625                             (int)p->u.write.saved_len, (int)ret);
626                 lseek(fd, p->u.write.off, SEEK_SET);
627                 set_cleanup(p, cleanup_write, struct write_call);
628         }
629
630         /* If we're a child, tell parent about write. */
631         if (control_fd != -1) {
632                 enum info_type type = WRITE;
633
634                 write_all(control_fd, &type, sizeof(type));
635                 write_all(control_fd, &p->u.write, sizeof(p->u.write));
636                 write_all(control_fd, buf, count);
637         }
638
639         /* FIXME: Try partial write returns. */
640         if (should_fail(p)) {
641                 p->u.write.ret = -1;
642                 p->error = EIO;
643         } else {
644                 /* FIXME: We assume same write order in parent and child */
645                 if (child_writes_num != 0) {
646                         if (child_writes[0].fd != fd)
647                                 errx(1, "Child wrote to fd %u, not %u?",
648                                      child_writes[0].fd, fd);
649                         if (child_writes[0].off != p->u.write.off)
650                                 errx(1, "Child wrote to offset %zu, not %zu?",
651                                      (size_t)child_writes[0].off,
652                                      (size_t)p->u.write.off);
653                         if (child_writes[0].count != count)
654                                 errx(1, "Child wrote length %zu, not %zu?",
655                                      child_writes[0].count, count);
656                         if (memcmp(child_writes[0].buf, buf, count)) {
657                                 child_fail(NULL, 0,
658                                            "Child wrote differently to"
659                                            " fd %u than we did!\n", fd);
660                         }
661                         free((char *)child_writes[0].buf);
662                         child_writes_num--;
663                         memmove(&child_writes[0], &child_writes[1],
664                                 sizeof(child_writes[0]) * child_writes_num);
665
666                         /* Is this is a socket or pipe, child wrote it
667                            already. */
668                         if (p->u.write.off == (off_t)-1) {
669                                 p->u.write.ret = count;
670                                 errno = p->error;
671                                 return p->u.write.ret;
672                         }
673                 }
674                 p->u.write.ret = pwrite(fd, buf, count, off);
675         }
676         errno = p->error;
677         return p->u.write.ret;
678 }
679
680 ssize_t failtest_read(int fd, void *buf, size_t count,
681                       const char *file, unsigned line)
682 {
683         return failtest_pread(fd, buf, count, lseek(fd, 0, SEEK_CUR),
684                               file, line);
685 }
686
687 ssize_t failtest_write(int fd, const void *buf, size_t count,
688                        const char *file, unsigned line)
689 {
690         return failtest_pwrite(fd, buf, count, lseek(fd, 0, SEEK_CUR),
691                                file, line);
692 }
693
694 static struct lock_info *WARN_UNUSED_RESULT
695 add_lock(struct lock_info *locks, int fd, off_t start, off_t end, int type)
696 {
697         unsigned int i;
698         struct lock_info *l;
699
700         for (i = 0; i < lock_num; i++) {
701                 l = &locks[i];
702
703                 if (l->fd != fd)
704                         continue;
705                 /* Four cases we care about:
706                  * Start overlap:
707                  *      l =    |      |
708                  *      new = |   |
709                  * Mid overlap:
710                  *      l =    |      |
711                  *      new =    |  |
712                  * End overlap:
713                  *      l =    |      |
714                  *      new =      |    |
715                  * Total overlap:
716                  *      l =    |      |
717                  *      new = |         |
718                  */
719                 if (start > l->start && end < l->end) {
720                         /* Mid overlap: trim entry, add new one. */
721                         off_t new_start, new_end;
722                         new_start = end + 1;
723                         new_end = l->end;
724                         l->end = start - 1;
725                         locks = add_lock(locks,
726                                          fd, new_start, new_end, l->type);
727                         l = &locks[i];
728                 } else if (start <= l->start && end >= l->end) {
729                         /* Total overlap: eliminate entry. */
730                         l->end = 0;
731                         l->start = 1;
732                 } else if (end >= l->start && end < l->end) {
733                         /* Start overlap: trim entry. */
734                         l->start = end + 1;
735                 } else if (start > l->start && start <= l->end) {
736                         /* End overlap: trim entry. */
737                         l->end = start-1;
738                 }
739                 /* Nothing left?  Remove it. */
740                 if (l->end < l->start) {
741                         memmove(l, l + 1, (--lock_num - i) * sizeof(l[0]));
742                         i--;
743                 }
744         }
745
746         if (type != F_UNLCK) {
747                 locks = realloc(locks, (lock_num + 1) * sizeof(*locks));
748                 l = &locks[lock_num++];
749                 l->fd = fd;
750                 l->start = start;
751                 l->end = end;
752                 l->type = type;
753         }
754         return locks;
755 }
756
757 /* We only trap this so we can dup fds in case we need to restore. */
758 int failtest_close(int fd)
759 {
760         int new_fd = -1, i;
761
762         if (fd < 0)
763                 return close(fd);
764
765         /* Trace history to find source of fd, and if we need to cleanup writes. */
766         for (i = history_num-1; i >= 0; i--) {
767                 switch (history[i].type) {
768                 case FAILTEST_WRITE:
769                         if (history[i].u.write.fd != fd)
770                                 break;
771                         if (!history[i].cleanup)
772                                 break;
773                         /* We need to save fd so we can restore file. */
774                         if (new_fd == -1)
775                                 new_fd = dup(fd);
776                         history[i].u.write.dup_fd = new_fd;
777                         break;
778                 case FAILTEST_READ:
779                         /* We don't need to cleanup reads on closed fds. */
780                         if (history[i].u.read.fd != fd)
781                                 break;
782                         history[i].cleanup = NULL;
783                         break;
784                 case FAILTEST_PIPE:
785                         /* From a pipe?  We don't ever restore pipes... */
786                         if (history[i].u.pipe.fds[0] == fd) {
787                                 assert(new_fd == -1);
788                                 history[i].u.pipe.closed[0] = true;
789                                 goto out;
790                         }
791                         if (history[i].u.pipe.fds[1] == fd) {
792                                 assert(new_fd == -1);
793                                 history[i].u.pipe.closed[1] = true;
794                                 goto out;
795                         }
796                         break;
797                 case FAILTEST_OPEN:
798                         if (history[i].u.open.ret == fd) {
799                                 history[i].u.open.dup_fd = new_fd;
800                                 goto out;
801                         }
802                         break;
803                 default:
804                         break;
805                 }
806         }
807
808 out:
809         locks = add_lock(locks, fd, 0, off_max(), F_UNLCK);
810         return close(fd);
811 }
812
813 /* Zero length means "to end of file" */
814 static off_t end_of(off_t start, off_t len)
815 {
816         if (len == 0)
817                 return off_max();
818         return start + len - 1;
819 }
820
821 /* FIXME: This only handles locks, really. */
822 int failtest_fcntl(int fd, const char *file, unsigned line, int cmd, ...)
823 {
824         struct failtest_call *p;
825         struct fcntl_call call;
826         va_list ap;
827
828         call.fd = fd;
829         call.cmd = cmd;
830
831         /* Argument extraction. */
832         switch (cmd) {
833         case F_SETFL:
834         case F_SETFD:
835                 va_start(ap, cmd);
836                 call.arg.l = va_arg(ap, long);
837                 va_end(ap);
838                 return fcntl(fd, cmd, call.arg.l);
839         case F_GETFD:
840         case F_GETFL:
841                 return fcntl(fd, cmd);
842         case F_GETLK:
843                 get_locks();
844                 va_start(ap, cmd);
845                 call.arg.fl = *va_arg(ap, struct flock *);
846                 va_end(ap);
847                 return fcntl(fd, cmd, &call.arg.fl);
848         case F_SETLK:
849         case F_SETLKW:
850                 va_start(ap, cmd);
851                 call.arg.fl = *va_arg(ap, struct flock *);
852                 va_end(ap);
853                 break;
854         default:
855                 /* This means you need to implement it here. */
856                 err(1, "failtest: unknown fcntl %u", cmd);
857         }
858
859         p = add_history(FAILTEST_FCNTL, file, line, &call);
860         get_locks();
861
862         if (should_fail(p)) {
863                 p->u.fcntl.ret = -1;
864                 if (p->u.fcntl.cmd == F_SETLK)
865                         p->error = EAGAIN;
866                 else
867                         p->error = EDEADLK;
868         } else {
869                 p->u.fcntl.ret = fcntl(p->u.fcntl.fd, p->u.fcntl.cmd,
870                                        &p->u.fcntl.arg.fl);
871                 if (p->u.fcntl.ret == -1)
872                         p->error = errno;
873                 else {
874                         /* We don't handle anything else yet. */
875                         assert(p->u.fcntl.arg.fl.l_whence == SEEK_SET);
876                         locks = add_lock(locks,
877                                          p->u.fcntl.fd,
878                                          p->u.fcntl.arg.fl.l_start,
879                                          end_of(p->u.fcntl.arg.fl.l_start,
880                                                 p->u.fcntl.arg.fl.l_len),
881                                          p->u.fcntl.arg.fl.l_type);
882                 }
883         }
884         errno = p->error;
885         return p->u.fcntl.ret;
886 }
887
888 void failtest_init(int argc, char *argv[])
889 {
890         unsigned int i;
891
892         for (i = 1; i < argc; i++) {
893                 if (!strncmp(argv[i], "--failpath=", strlen("--failpath="))) {
894                         failpath = argv[i] + strlen("--failpath=");
895                 } else if (strcmp(argv[i], "--tracepath") == 0) {
896                         tracefd = dup(STDERR_FILENO);
897                         failtest_timeout_ms = -1;
898                 }
899         }
900         gettimeofday(&start, NULL);
901 }
902
903 /* Free up memory, so valgrind doesn't report leaks. */
904 static void free_everything(void)
905 {
906         unsigned int i;
907
908         /* We don't do this in cleanup: needed even for failed opens. */
909         for (i = 0; i < history_num; i++) {
910                 if (history[i].type == FAILTEST_OPEN)
911                         free((char *)history[i].u.open.pathname);
912         }
913         free(history);
914 }
915
916 void failtest_exit(int status)
917 {
918         int i;
919
920         if (control_fd == -1) {
921                 free_everything();
922                 exit(status);
923         }
924
925         if (failtest_exit_check) {
926                 if (!failtest_exit_check(history, history_num))
927                         child_fail(NULL, 0, "failtest_exit_check failed\n");
928         }
929
930         /* Cleanup everything, in reverse order. */
931         for (i = history_num - 1; i >= 0; i--)
932                 if (history[i].cleanup)
933                         history[i].cleanup(&history[i].u);
934
935         free_everything();
936         tell_parent(SUCCESS);
937         exit(0);
938 }