f6e97138e8b7f07b5b00336a9d7a0ed834d045d9
[ccan] / ccan / tdb / test / lock-tracking.c
1 /* We save the locks so we can reaquire them. */
2 #include <unistd.h>
3 #include <fcntl.h>
4 #include <stdarg.h>
5 #include <stdlib.h>
6 #include <ccan/tap/tap.h>
7 #include <ccan/tdb/tdb_private.h>
8
9 struct lock {
10         struct lock *next;
11         unsigned int off;
12         unsigned int len;
13         int type;
14 };
15 static struct lock *locks;
16 int locking_errors = 0;
17 bool suppress_lockcheck = false;
18 bool nonblocking_locks;
19 int locking_would_block = 0;
20 void (*unlock_callback)(int fd);
21
22 int fcntl_with_lockcheck(int fd, int cmd, ... /* arg */ )
23 {
24         va_list ap;
25         int ret, arg3;
26         struct flock *fl;
27         bool may_block = false;
28
29         if (cmd != F_SETLK && cmd != F_SETLKW) {
30                 /* This may be totally bogus, but we don't know in general. */
31                 va_start(ap, cmd);
32                 arg3 = va_arg(ap, int);
33                 va_end(ap);
34
35                 return fcntl(fd, cmd, arg3);
36         }
37
38         va_start(ap, cmd);
39         fl = va_arg(ap, struct flock *);
40         va_end(ap);
41
42         if (cmd == F_SETLKW && nonblocking_locks) {
43                 cmd = F_SETLK;
44                 may_block = true;
45         }
46         ret = fcntl(fd, cmd, fl);
47
48         /* Detect when we failed, but might have been OK if we waited. */
49         if (may_block && ret == -1 && (errno == EAGAIN || errno == EACCES)) {
50                 locking_would_block++;
51         }
52
53         if (fl->l_type == F_UNLCK) {
54                 struct lock **l;
55                 struct lock *old = NULL;
56                         
57                 for (l = &locks; *l; l = &(*l)->next) {
58                         if ((*l)->off == fl->l_start
59                             && (*l)->len == fl->l_len) {
60                                 if (ret == 0) {
61                                         old = *l;
62                                         *l = (*l)->next;
63                                         free(old);
64                                 }
65                                 break;
66                         }
67                 }
68                 if (!old && !suppress_lockcheck) {
69                         diag("Unknown unlock %u@%u - %i",
70                              (int)fl->l_len, (int)fl->l_start, ret);
71                         locking_errors++;
72                 }
73         } else {
74                 struct lock *new, *i;
75                 unsigned int fl_end = fl->l_start + fl->l_len;
76                 if (fl->l_len == 0)
77                         fl_end = (unsigned int)-1;
78
79                 /* Check for overlaps: we shouldn't do this. */
80                 for (i = locks; i; i = i->next) {
81                         unsigned int i_end = i->off + i->len;
82                         if (i->len == 0)
83                                 i_end = (unsigned int)-1;
84
85                         if (fl->l_start >= i->off && fl->l_start < i_end)
86                                 break;
87                         if (fl_end >= i->off && fl_end < i_end)
88                                 break;
89
90                         /* tdb_allrecord_lock does this, handle adjacent: */
91                         if (fl->l_start == i_end && fl->l_type == i->type) {
92                                 if (ret == 0) {
93                                         i->len = fl->l_len 
94                                                 ? i->len + fl->l_len
95                                                 : 0;
96                                 }
97                                 goto done;
98                         }
99                 }
100                 if (i) {
101                         /* Special case: upgrade of allrecord lock. */
102                         if (i->type == F_RDLCK && fl->l_type == F_WRLCK
103                             && i->off == FREELIST_TOP
104                             && fl->l_start == FREELIST_TOP
105                             && i->len == 0
106                             && fl->l_len == 0) {
107                                 if (ret == 0)
108                                         i->type = F_WRLCK;
109                                 goto done;
110                         }
111                         if (!suppress_lockcheck) {
112                                 diag("%s lock %u@%u overlaps %u@%u",
113                                      fl->l_type == F_WRLCK ? "write" : "read",
114                                      (int)fl->l_len, (int)fl->l_start,
115                                      i->len, (int)i->off);
116                                 locking_errors++;
117                         }
118                 }
119
120                 if (ret == 0) {
121                         new = malloc(sizeof *new);
122                         new->off = fl->l_start;
123                         new->len = fl->l_len;
124                         new->type = fl->l_type;
125                         new->next = locks;
126                         locks = new;
127                 }
128         }
129 done:
130         if (ret == 0 && fl->l_type == F_UNLCK && unlock_callback)
131                 unlock_callback(fd);
132         return ret;
133 }
134
135 int forget_locking(void)
136 {
137         unsigned int num = 0;
138         while (locks) {
139                 struct lock *next = locks->next;
140                 free(locks);
141                 locks = next;
142                 num++;
143         }
144         return num;
145 }