]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/test/lock-tracking.c
tdb2: rename set_header to the more appropriate set_used_header.
[ccan] / ccan / tdb2 / test / lock-tracking.c
1 /* We save the locks so we can reaquire them. */
2 #include <unistd.h>
3 #include <fcntl.h>
4 #include <stdarg.h>
5 #include <stdlib.h>
6 #include <ccan/tap/tap.h>
7 #include <ccan/tdb2/private.h>
8 #include "lock-tracking.h"
9
10 struct lock {
11         struct lock *next;
12         unsigned int off;
13         unsigned int len;
14         int type;
15 };
16 static struct lock *locks;
17 int locking_errors = 0;
18 bool suppress_lockcheck = false;
19 bool nonblocking_locks;
20 int locking_would_block = 0;
21 void (*unlock_callback)(int fd);
22
23 int fcntl_with_lockcheck(int fd, int cmd, ... /* arg */ )
24 {
25         va_list ap;
26         int ret, arg3;
27         struct flock *fl;
28         bool may_block = false;
29
30         if (cmd != F_SETLK && cmd != F_SETLKW) {
31                 /* This may be totally bogus, but we don't know in general. */
32                 va_start(ap, cmd);
33                 arg3 = va_arg(ap, int);
34                 va_end(ap);
35
36                 return fcntl(fd, cmd, arg3);
37         }
38
39         va_start(ap, cmd);
40         fl = va_arg(ap, struct flock *);
41         va_end(ap);
42
43         if (cmd == F_SETLKW && nonblocking_locks) {
44                 cmd = F_SETLK;
45                 may_block = true;
46         }
47         ret = fcntl(fd, cmd, fl);
48
49         /* Detect when we failed, but might have been OK if we waited. */
50         if (may_block && ret == -1 && (errno == EAGAIN || errno == EACCES)) {
51                 locking_would_block++;
52         }
53
54         if (fl->l_type == F_UNLCK) {
55                 struct lock **l;
56                 struct lock *old = NULL;
57
58                 for (l = &locks; *l; l = &(*l)->next) {
59                         if ((*l)->off == fl->l_start
60                             && (*l)->len == fl->l_len) {
61                                 if (ret == 0) {
62                                         old = *l;
63                                         *l = (*l)->next;
64                                         free(old);
65                                 }
66                                 break;
67                         }
68                 }
69                 if (!old && !suppress_lockcheck) {
70                         diag("Unknown unlock %u@%u - %i",
71                              (int)fl->l_len, (int)fl->l_start, ret);
72                         locking_errors++;
73                 }
74         } else {
75                 struct lock *new, *i;
76                 unsigned int fl_end = fl->l_start + fl->l_len;
77                 if (fl->l_len == 0)
78                         fl_end = (unsigned int)-1;
79
80                 /* Check for overlaps: we shouldn't do this. */
81                 for (i = locks; i; i = i->next) {
82                         unsigned int i_end = i->off + i->len;
83                         if (i->len == 0)
84                                 i_end = (unsigned int)-1;
85
86                         if (fl->l_start >= i->off && fl->l_start < i_end)
87                                 break;
88                         if (fl_end > i->off && fl_end < i_end)
89                                 break;
90
91                         /* tdb_allrecord_lock does this, handle adjacent: */
92                         if (fl->l_start > TDB_HASH_LOCK_START
93                             && fl->l_start == i_end && fl->l_type == i->type) {
94                                 if (ret == 0) {
95                                         i->len = fl->l_len
96                                                 ? i->len + fl->l_len
97                                                 : 0;
98                                 }
99                                 goto done;
100                         }
101                 }
102                 if (i) {
103                         /* Special case: upgrade of allrecord lock. */
104                         if (i->type == F_RDLCK && fl->l_type == F_WRLCK
105                             && i->off == TDB_HASH_LOCK_START
106                             && fl->l_start == TDB_HASH_LOCK_START
107                             && i->len == 0
108                             && fl->l_len == 0) {
109                                 if (ret == 0)
110                                         i->type = F_WRLCK;
111                                 goto done;
112                         }
113                         if (!suppress_lockcheck) {
114                                 diag("%s lock %u@%u overlaps %u@%u",
115                                      fl->l_type == F_WRLCK ? "write" : "read",
116                                      (int)fl->l_len, (int)fl->l_start,
117                                      i->len, (int)i->off);
118                                 locking_errors++;
119                         }
120                 }
121
122                 if (ret == 0) {
123                         new = malloc(sizeof *new);
124                         new->off = fl->l_start;
125                         new->len = fl->l_len;
126                         new->type = fl->l_type;
127                         new->next = locks;
128                         locks = new;
129                 }
130         }
131 done:
132         if (ret == 0 && fl->l_type == F_UNLCK && unlock_callback)
133                 unlock_callback(fd);
134         return ret;
135 }
136
137 unsigned int forget_locking(void)
138 {
139         unsigned int num = 0;
140         while (locks) {
141                 struct lock *next = locks->next;
142                 free(locks);
143                 locks = next;
144                 num++;
145         }
146         return num;
147 }