]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/lock.c
4033d0afe813e59bf4dee4b8bcc9e5d34757c298
[ccan] / ccan / tdb2 / lock.c
1  /*
2    Unix SMB/CIFS implementation.
3
4    trivial database library
5
6    Copyright (C) Andrew Tridgell              1999-2005
7    Copyright (C) Paul `Rusty' Russell              2000
8    Copyright (C) Jeremy Allison                    2000-2003
9
10      ** NOTE! The following LGPL license applies to the tdb
11      ** library. This does NOT imply that all of Samba is released
12      ** under the LGPL
13
14    This library is free software; you can redistribute it and/or
15    modify it under the terms of the GNU Lesser General Public
16    License as published by the Free Software Foundation; either
17    version 3 of the License, or (at your option) any later version.
18
19    This library is distributed in the hope that it will be useful,
20    but WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    Lesser General Public License for more details.
23
24    You should have received a copy of the GNU Lesser General Public
25    License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 */
27
28 #include "private.h"
29 #include <assert.h>
30 #include <ccan/build_assert/build_assert.h>
31
32 /* If we were threaded, we could wait for unlock, but we're not, so fail. */
33 static enum TDB_ERROR owner_conflict(struct tdb_context *tdb, const char *call)
34 {
35         return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
36                           "%s: lock owned by another tdb in this process.",
37                           call);
38 }
39
40 /* If we fork, we no longer really own locks: preserves errno */
41 static bool check_lock_pid(struct tdb_context *tdb,
42                            const char *call, bool log)
43 {
44         /* No locks?  No problem! */
45         if (tdb->file->allrecord_lock.count == 0
46             && tdb->file->num_lockrecs == 0) {
47                 return true;
48         }
49
50         /* No fork?  No problem! */
51         if (tdb->file->locker == getpid()) {
52                 return true;
53         }
54
55         if (log) {
56                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
57                            "%s: fork() detected after lock acquisition!"
58                            " (%u vs %u)", call, tdb->file->locker, getpid());
59         }
60         return false;
61 }
62
63 int tdb_fcntl_lock(int fd, int rw, off_t off, off_t len, bool waitflag,
64                    void *unused)
65 {
66         struct flock fl;
67         int ret;
68
69         do {
70                 fl.l_type = rw;
71                 fl.l_whence = SEEK_SET;
72                 fl.l_start = off;
73                 fl.l_len = len;
74
75                 if (waitflag)
76                         ret = fcntl(fd, F_SETLKW, &fl);
77                 else
78                         ret = fcntl(fd, F_SETLK, &fl);
79         } while (ret != 0 && errno == EINTR);
80         return ret;
81 }
82
83 int tdb_fcntl_unlock(int fd, int rw, off_t off, off_t len, void *unused)
84 {
85         struct flock fl;
86         int ret;
87
88         do {
89                 fl.l_type = F_UNLCK;
90                 fl.l_whence = SEEK_SET;
91                 fl.l_start = off;
92                 fl.l_len = len;
93
94                 ret = fcntl(fd, F_SETLKW, &fl);
95         } while (ret != 0 && errno == EINTR);
96         return ret;
97 }
98
99 static int lock(struct tdb_context *tdb,
100                       int rw, off_t off, off_t len, bool waitflag)
101 {
102         if (tdb->file->allrecord_lock.count == 0
103             && tdb->file->num_lockrecs == 0) {
104                 tdb->file->locker = getpid();
105         }
106
107         tdb->stats.lock_lowlevel++;
108         if (!waitflag)
109                 tdb->stats.lock_nonblock++;
110         return tdb->lock_fn(tdb->file->fd, rw, off, len, waitflag,
111                             tdb->lock_data);
112 }
113
114 static int unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
115 {
116 #if 0 /* Check they matched up locks and unlocks correctly. */
117         char line[80];
118         FILE *locks;
119         bool found = false;
120
121         locks = fopen("/proc/locks", "r");
122
123         while (fgets(line, 80, locks)) {
124                 char *p;
125                 int type, start, l;
126
127                 /* eg. 1: FLOCK  ADVISORY  WRITE 2440 08:01:2180826 0 EOF */
128                 p = strchr(line, ':') + 1;
129                 if (strncmp(p, " POSIX  ADVISORY  ", strlen(" POSIX  ADVISORY  ")))
130                         continue;
131                 p += strlen(" FLOCK  ADVISORY  ");
132                 if (strncmp(p, "READ  ", strlen("READ  ")) == 0)
133                         type = F_RDLCK;
134                 else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0)
135                         type = F_WRLCK;
136                 else
137                         abort();
138                 p += 6;
139                 if (atoi(p) != getpid())
140                         continue;
141                 p = strchr(strchr(p, ' ') + 1, ' ') + 1;
142                 start = atoi(p);
143                 p = strchr(p, ' ') + 1;
144                 if (strncmp(p, "EOF", 3) == 0)
145                         l = 0;
146                 else
147                         l = atoi(p) - start + 1;
148
149                 if (off == start) {
150                         if (len != l) {
151                                 fprintf(stderr, "Len %u should be %u: %s",
152                                         (int)len, l, line);
153                                 abort();
154                         }
155                         if (type != rw) {
156                                 fprintf(stderr, "Type %s wrong: %s",
157                                         rw == F_RDLCK ? "READ" : "WRITE", line);
158                                 abort();
159                         }
160                         found = true;
161                         break;
162                 }
163         }
164
165         if (!found) {
166                 fprintf(stderr, "Unlock on %u@%u not found!",
167                         (int)off, (int)len);
168                 abort();
169         }
170
171         fclose(locks);
172 #endif
173
174         return tdb->unlock_fn(tdb->file->fd, rw, off, len, tdb->lock_data);
175 }
176
177 /* a byte range locking function - return 0 on success
178    this functions locks len bytes at the specified offset.
179
180    note that a len of zero means lock to end of file
181 */
182 static enum TDB_ERROR tdb_brlock(struct tdb_context *tdb,
183                                  int rw_type, tdb_off_t offset, tdb_off_t len,
184                                  enum tdb_lock_flags flags)
185 {
186         int ret;
187
188         if (tdb->flags & TDB_NOLOCK) {
189                 return TDB_SUCCESS;
190         }
191
192         if (rw_type == F_WRLCK && tdb->read_only) {
193                 return tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_LOG_USE_ERROR,
194                                   "Write lock attempted on read-only database");
195         }
196
197         /* A 32 bit system cannot open a 64-bit file, but it could have
198          * expanded since then: check here. */
199         if ((size_t)(offset + len) != offset + len) {
200                 return tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
201                                   "tdb_brlock: lock on giant offset %llu",
202                                   (long long)(offset + len));
203         }
204
205         ret = lock(tdb, rw_type, offset, len, flags & TDB_LOCK_WAIT);
206         if (ret != 0) {
207                 /* Generic lock error. errno set by fcntl.
208                  * EAGAIN is an expected return from non-blocking
209                  * locks. */
210                 if (!(flags & TDB_LOCK_PROBE)
211                     && (errno != EAGAIN && errno != EINTR)) {
212                         tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
213                                    "tdb_brlock failed (fd=%d) at"
214                                    " offset %zu rw_type=%d flags=%d len=%zu:"
215                                    " %s",
216                                    tdb->file->fd, (size_t)offset, rw_type,
217                                    flags, (size_t)len, strerror(errno));
218                 }
219                 return TDB_ERR_LOCK;
220         }
221         return TDB_SUCCESS;
222 }
223
224 static enum TDB_ERROR tdb_brunlock(struct tdb_context *tdb,
225                                    int rw_type, tdb_off_t offset, size_t len)
226 {
227         int ret;
228
229         if (tdb->flags & TDB_NOLOCK) {
230                 return TDB_SUCCESS;
231         }
232
233         ret = unlock(tdb, rw_type, offset, len);
234
235         /* If we fail, *then* we verify that we owned the lock.  If not, ok. */
236         if (ret == -1 && check_lock_pid(tdb, "tdb_brunlock", false)) {
237                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
238                                   "tdb_brunlock failed (fd=%d) at offset %zu"
239                                   " rw_type=%d len=%zu: %s",
240                                   tdb->file->fd, (size_t)offset, rw_type,
241                                   (size_t)len, strerror(errno));
242         }
243         return TDB_SUCCESS;
244 }
245
246 /*
247   upgrade a read lock to a write lock. This needs to be handled in a
248   special way as some OSes (such as solaris) have too conservative
249   deadlock detection and claim a deadlock when progress can be
250   made. For those OSes we may loop for a while.
251 */
252 enum TDB_ERROR tdb_allrecord_upgrade(struct tdb_context *tdb)
253 {
254         int count = 1000;
255
256         if (!check_lock_pid(tdb, "tdb_transaction_prepare_commit", true))
257                 return TDB_ERR_LOCK;
258
259         if (tdb->file->allrecord_lock.count != 1) {
260                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
261                                   "tdb_allrecord_upgrade failed:"
262                                   " count %u too high",
263                                   tdb->file->allrecord_lock.count);
264         }
265
266         if (tdb->file->allrecord_lock.off != 1) {
267                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
268                                   "tdb_allrecord_upgrade failed:"
269                                   " already upgraded?");
270         }
271
272         if (tdb->file->allrecord_lock.owner != tdb) {
273                 return owner_conflict(tdb, "tdb_allrecord_upgrade");
274         }
275
276         while (count--) {
277                 struct timeval tv;
278                 if (tdb_brlock(tdb, F_WRLCK,
279                                TDB_HASH_LOCK_START, 0,
280                                TDB_LOCK_WAIT|TDB_LOCK_PROBE) == TDB_SUCCESS) {
281                         tdb->file->allrecord_lock.ltype = F_WRLCK;
282                         tdb->file->allrecord_lock.off = 0;
283                         return TDB_SUCCESS;
284                 }
285                 if (errno != EDEADLK) {
286                         break;
287                 }
288                 /* sleep for as short a time as we can - more portable than usleep() */
289                 tv.tv_sec = 0;
290                 tv.tv_usec = 1;
291                 select(0, NULL, NULL, NULL, &tv);
292         }
293
294         if (errno != EAGAIN && errno != EINTR)
295                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
296                            "tdb_allrecord_upgrade failed");
297         return TDB_ERR_LOCK;
298 }
299
300 static struct tdb_lock *find_nestlock(struct tdb_context *tdb, tdb_off_t offset,
301                                       const struct tdb_context *owner)
302 {
303         unsigned int i;
304
305         for (i=0; i<tdb->file->num_lockrecs; i++) {
306                 if (tdb->file->lockrecs[i].off == offset) {
307                         if (owner && tdb->file->lockrecs[i].owner != owner)
308                                 return NULL;
309                         return &tdb->file->lockrecs[i];
310                 }
311         }
312         return NULL;
313 }
314
315 enum TDB_ERROR tdb_lock_and_recover(struct tdb_context *tdb)
316 {
317         enum TDB_ERROR ecode;
318
319         if (!check_lock_pid(tdb, "tdb_transaction_prepare_commit", true))
320                 return TDB_ERR_LOCK;
321
322         ecode = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK,
323                                    false);
324         if (ecode != TDB_SUCCESS) {
325                 return ecode;
326         }
327
328         ecode = tdb_lock_open(tdb, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK);
329         if (ecode != TDB_SUCCESS) {
330                 tdb_allrecord_unlock(tdb, F_WRLCK);
331                 return ecode;
332         }
333         ecode = tdb_transaction_recover(tdb);
334         tdb_unlock_open(tdb);
335         tdb_allrecord_unlock(tdb, F_WRLCK);
336
337         return ecode;
338 }
339
340 /* lock an offset in the database. */
341 static enum TDB_ERROR tdb_nest_lock(struct tdb_context *tdb,
342                                     tdb_off_t offset, int ltype,
343                                     enum tdb_lock_flags flags)
344 {
345         struct tdb_lock *new_lck;
346         enum TDB_ERROR ecode;
347
348         if (offset > (TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
349                       + tdb->file->map_size / 8)) {
350                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
351                                   "tdb_nest_lock: invalid offset %zu ltype=%d",
352                                   (size_t)offset, ltype);
353         }
354
355         if (tdb->flags & TDB_NOLOCK)
356                 return TDB_SUCCESS;
357
358         if (!check_lock_pid(tdb, "tdb_nest_lock", true)) {
359                 return TDB_ERR_LOCK;
360         }
361
362         tdb->stats.locks++;
363
364         new_lck = find_nestlock(tdb, offset, NULL);
365         if (new_lck) {
366                 if (new_lck->owner != tdb) {
367                         return owner_conflict(tdb, "tdb_nest_lock");
368                 }
369
370                 if (new_lck->ltype == F_RDLCK && ltype == F_WRLCK) {
371                         return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
372                                           "tdb_nest_lock:"
373                                           " offset %zu has read lock",
374                                           (size_t)offset);
375                 }
376                 /* Just increment the struct, posix locks don't stack. */
377                 new_lck->count++;
378                 return TDB_SUCCESS;
379         }
380
381 #if 0
382         if (tdb->file->num_lockrecs
383             && offset >= TDB_HASH_LOCK_START
384             && offset < TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE) {
385                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
386                                   "tdb_nest_lock: already have a hash lock?");
387         }
388 #endif
389
390         new_lck = (struct tdb_lock *)realloc(
391                 tdb->file->lockrecs,
392                 sizeof(*tdb->file->lockrecs) * (tdb->file->num_lockrecs+1));
393         if (new_lck == NULL) {
394                 return tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
395                                   "tdb_nest_lock:"
396                                   " unable to allocate %zu lock struct",
397                                   tdb->file->num_lockrecs + 1);
398         }
399         tdb->file->lockrecs = new_lck;
400
401         /* Since fcntl locks don't nest, we do a lock for the first one,
402            and simply bump the count for future ones */
403         ecode = tdb_brlock(tdb, ltype, offset, 1, flags);
404         if (ecode != TDB_SUCCESS) {
405                 return ecode;
406         }
407
408         /* First time we grab a lock, perhaps someone died in commit? */
409         if (!(flags & TDB_LOCK_NOCHECK)
410             && tdb->file->num_lockrecs == 0) {
411                 tdb_bool_err berr = tdb_needs_recovery(tdb);
412                 if (berr != false) {
413                         tdb_brunlock(tdb, ltype, offset, 1);
414
415                         if (berr < 0)
416                                 return berr;
417                         ecode = tdb_lock_and_recover(tdb);
418                         if (ecode == TDB_SUCCESS) {
419                                 ecode = tdb_brlock(tdb, ltype, offset, 1,
420                                                    flags);
421                         }
422                         if (ecode != TDB_SUCCESS) {
423                                 return ecode;
424                         }
425                 }
426         }
427
428         tdb->file->lockrecs[tdb->file->num_lockrecs].owner = tdb;
429         tdb->file->lockrecs[tdb->file->num_lockrecs].off = offset;
430         tdb->file->lockrecs[tdb->file->num_lockrecs].count = 1;
431         tdb->file->lockrecs[tdb->file->num_lockrecs].ltype = ltype;
432         tdb->file->num_lockrecs++;
433
434         return TDB_SUCCESS;
435 }
436
437 static enum TDB_ERROR tdb_nest_unlock(struct tdb_context *tdb,
438                                       tdb_off_t off, int ltype)
439 {
440         struct tdb_lock *lck;
441         enum TDB_ERROR ecode;
442
443         if (tdb->flags & TDB_NOLOCK)
444                 return TDB_SUCCESS;
445
446         lck = find_nestlock(tdb, off, tdb);
447         if ((lck == NULL) || (lck->count == 0)) {
448                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
449                                   "tdb_nest_unlock: no lock for %zu",
450                                   (size_t)off);
451         }
452
453         if (lck->count > 1) {
454                 lck->count--;
455                 return TDB_SUCCESS;
456         }
457
458         /*
459          * This lock has count==1 left, so we need to unlock it in the
460          * kernel. We don't bother with decrementing the in-memory array
461          * element, we're about to overwrite it with the last array element
462          * anyway.
463          */
464         ecode = tdb_brunlock(tdb, ltype, off, 1);
465
466         /*
467          * Shrink the array by overwriting the element just unlocked with the
468          * last array element.
469          */
470         *lck = tdb->file->lockrecs[--tdb->file->num_lockrecs];
471
472         return ecode;
473 }
474
475 /*
476   get the transaction lock
477  */
478 enum TDB_ERROR tdb_transaction_lock(struct tdb_context *tdb, int ltype)
479 {
480         return tdb_nest_lock(tdb, TDB_TRANSACTION_LOCK, ltype, TDB_LOCK_WAIT);
481 }
482
483 /*
484   release the transaction lock
485  */
486 void tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
487 {
488         tdb_nest_unlock(tdb, TDB_TRANSACTION_LOCK, ltype);
489 }
490
491 /* We only need to lock individual bytes, but Linux merges consecutive locks
492  * so we lock in contiguous ranges. */
493 static enum TDB_ERROR tdb_lock_gradual(struct tdb_context *tdb,
494                                        int ltype, enum tdb_lock_flags flags,
495                                        tdb_off_t off, tdb_off_t len)
496 {
497         enum TDB_ERROR ecode;
498         enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT);
499
500         if (len <= 1) {
501                 /* 0 would mean to end-of-file... */
502                 assert(len != 0);
503                 /* Single hash.  Just do blocking lock. */
504                 return tdb_brlock(tdb, ltype, off, len, flags);
505         }
506
507         /* First we try non-blocking. */
508         if (tdb_brlock(tdb, ltype, off, len, nb_flags) == TDB_SUCCESS) {
509                 return TDB_SUCCESS;
510         }
511
512         /* Try locking first half, then second. */
513         ecode = tdb_lock_gradual(tdb, ltype, flags, off, len / 2);
514         if (ecode != TDB_SUCCESS)
515                 return ecode;
516
517         ecode = tdb_lock_gradual(tdb, ltype, flags,
518                                  off + len / 2, len - len / 2);
519         if (ecode != TDB_SUCCESS) {
520                 tdb_brunlock(tdb, ltype, off, len / 2);
521         }
522         return ecode;
523 }
524
525 /* lock/unlock entire database.  It can only be upgradable if you have some
526  * other way of guaranteeing exclusivity (ie. transaction write lock). */
527 enum TDB_ERROR tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
528                                   enum tdb_lock_flags flags, bool upgradable)
529 {
530         enum TDB_ERROR ecode;
531         tdb_bool_err berr;
532
533         if (tdb->flags & TDB_NOLOCK)
534                 return TDB_SUCCESS;
535
536         if (!check_lock_pid(tdb, "tdb_allrecord_lock", true)) {
537                 return TDB_ERR_LOCK;
538         }
539
540         if (tdb->file->allrecord_lock.count) {
541                 if (tdb->file->allrecord_lock.owner != tdb) {
542                         return owner_conflict(tdb, "tdb_allrecord_lock");
543                 }
544
545                 if (ltype == F_RDLCK
546                     || tdb->file->allrecord_lock.ltype == F_WRLCK) {
547                         tdb->file->allrecord_lock.count++;
548                         return TDB_SUCCESS;
549                 }
550
551                 /* a global lock of a different type exists */
552                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
553                                   "tdb_allrecord_lock: already have %s lock",
554                                   tdb->file->allrecord_lock.ltype == F_RDLCK
555                                   ? "read" : "write");
556         }
557
558         if (tdb_has_hash_locks(tdb)) {
559                 /* can't combine global and chain locks */
560                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
561                                   "tdb_allrecord_lock:"
562                                   " already have chain lock");
563         }
564
565         if (upgradable && ltype != F_RDLCK) {
566                 /* tdb error: you can't upgrade a write lock! */
567                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
568                                   "tdb_allrecord_lock:"
569                                   " can't upgrade a write lock");
570         }
571
572         tdb->stats.locks++;
573 again:
574         /* Lock hashes, gradually. */
575         ecode = tdb_lock_gradual(tdb, ltype, flags, TDB_HASH_LOCK_START,
576                                  TDB_HASH_LOCK_RANGE);
577         if (ecode != TDB_SUCCESS)
578                 return ecode;
579
580         /* Lock free tables: there to end of file. */
581         ecode = tdb_brlock(tdb, ltype,
582                            TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE,
583                            0, flags);
584         if (ecode != TDB_SUCCESS) {
585                 tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START,
586                              TDB_HASH_LOCK_RANGE);
587                 return ecode;
588         }
589
590         tdb->file->allrecord_lock.owner = tdb;
591         tdb->file->allrecord_lock.count = 1;
592         /* If it's upgradable, it's actually exclusive so we can treat
593          * it as a write lock. */
594         tdb->file->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
595         tdb->file->allrecord_lock.off = upgradable;
596
597         /* Now check for needing recovery. */
598         if (flags & TDB_LOCK_NOCHECK)
599                 return TDB_SUCCESS;
600
601         berr = tdb_needs_recovery(tdb);
602         if (likely(berr == false))
603                 return TDB_SUCCESS;
604
605         tdb_allrecord_unlock(tdb, ltype);
606         if (berr < 0)
607                 return berr;
608         ecode = tdb_lock_and_recover(tdb);
609         if (ecode != TDB_SUCCESS) {
610                 return ecode;
611         }
612         goto again;
613 }
614
615 enum TDB_ERROR tdb_lock_open(struct tdb_context *tdb, enum tdb_lock_flags flags)
616 {
617         return tdb_nest_lock(tdb, TDB_OPEN_LOCK, F_WRLCK, flags);
618 }
619
620 void tdb_unlock_open(struct tdb_context *tdb)
621 {
622         tdb_nest_unlock(tdb, TDB_OPEN_LOCK, F_WRLCK);
623 }
624
625 bool tdb_has_open_lock(struct tdb_context *tdb)
626 {
627         return !(tdb->flags & TDB_NOLOCK)
628                 && find_nestlock(tdb, TDB_OPEN_LOCK, tdb) != NULL;
629 }
630
631 enum TDB_ERROR tdb_lock_expand(struct tdb_context *tdb, int ltype)
632 {
633         /* Lock doesn't protect data, so don't check (we recurse if we do!) */
634         return tdb_nest_lock(tdb, TDB_EXPANSION_LOCK, ltype,
635                              TDB_LOCK_WAIT | TDB_LOCK_NOCHECK);
636 }
637
638 void tdb_unlock_expand(struct tdb_context *tdb, int ltype)
639 {
640         tdb_nest_unlock(tdb, TDB_EXPANSION_LOCK, ltype);
641 }
642
643 /* unlock entire db */
644 void tdb_allrecord_unlock(struct tdb_context *tdb, int ltype)
645 {
646         if (tdb->flags & TDB_NOLOCK)
647                 return;
648
649         if (tdb->file->allrecord_lock.count == 0) {
650                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
651                            "tdb_allrecord_unlock: not locked!");
652                 return;
653         }
654
655         if (tdb->file->allrecord_lock.owner != tdb) {
656                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
657                            "tdb_allrecord_unlock: not locked by us!");
658                 return;
659         }
660
661         /* Upgradable locks are marked as write locks. */
662         if (tdb->file->allrecord_lock.ltype != ltype
663             && (!tdb->file->allrecord_lock.off || ltype != F_RDLCK)) {
664                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
665                            "tdb_allrecord_unlock: have %s lock",
666                            tdb->file->allrecord_lock.ltype == F_RDLCK
667                            ? "read" : "write");
668                 return;
669         }
670
671         if (tdb->file->allrecord_lock.count > 1) {
672                 tdb->file->allrecord_lock.count--;
673                 return;
674         }
675
676         tdb->file->allrecord_lock.count = 0;
677         tdb->file->allrecord_lock.ltype = 0;
678
679         tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START, 0);
680 }
681
682 bool tdb_has_expansion_lock(struct tdb_context *tdb)
683 {
684         return find_nestlock(tdb, TDB_EXPANSION_LOCK, tdb) != NULL;
685 }
686
687 bool tdb_has_hash_locks(struct tdb_context *tdb)
688 {
689         unsigned int i;
690
691         for (i=0; i<tdb->file->num_lockrecs; i++) {
692                 if (tdb->file->lockrecs[i].off >= TDB_HASH_LOCK_START
693                     && tdb->file->lockrecs[i].off < (TDB_HASH_LOCK_START
694                                                      + TDB_HASH_LOCK_RANGE))
695                         return true;
696         }
697         return false;
698 }
699
700 static bool tdb_has_free_lock(struct tdb_context *tdb)
701 {
702         unsigned int i;
703
704         if (tdb->flags & TDB_NOLOCK)
705                 return false;
706
707         for (i=0; i<tdb->file->num_lockrecs; i++) {
708                 if (tdb->file->lockrecs[i].off
709                     > TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE)
710                         return true;
711         }
712         return false;
713 }
714
715 enum TDB_ERROR tdb_lock_hashes(struct tdb_context *tdb,
716                                tdb_off_t hash_lock,
717                                tdb_len_t hash_range,
718                                int ltype, enum tdb_lock_flags waitflag)
719 {
720         /* FIXME: Do this properly, using hlock_range */
721         unsigned l = TDB_HASH_LOCK_START
722                 + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
723
724         /* a allrecord lock allows us to avoid per chain locks */
725         if (tdb->file->allrecord_lock.count) {
726                 if (!check_lock_pid(tdb, "tdb_lock_hashes", true))
727                         return TDB_ERR_LOCK;
728
729                 if (tdb->file->allrecord_lock.owner != tdb)
730                         return owner_conflict(tdb, "tdb_lock_hashes");
731                 if (ltype == tdb->file->allrecord_lock.ltype
732                     || ltype == F_RDLCK) {
733                         return TDB_SUCCESS;
734                 }
735
736                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
737                                   "tdb_lock_hashes:"
738                                   " already have %s allrecordlock",
739                                   tdb->file->allrecord_lock.ltype == F_RDLCK
740                                   ? "read" : "write");
741         }
742
743         if (tdb_has_free_lock(tdb)) {
744                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
745                                   "tdb_lock_hashes: already have free lock");
746         }
747
748         if (tdb_has_expansion_lock(tdb)) {
749                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
750                                   "tdb_lock_hashes:"
751                                   " already have expansion lock");
752         }
753
754         return tdb_nest_lock(tdb, l, ltype, waitflag);
755 }
756
757 enum TDB_ERROR tdb_unlock_hashes(struct tdb_context *tdb,
758                                  tdb_off_t hash_lock,
759                                  tdb_len_t hash_range, int ltype)
760 {
761         unsigned l = TDB_HASH_LOCK_START
762                 + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
763
764         if (tdb->flags & TDB_NOLOCK)
765                 return 0;
766
767         /* a allrecord lock allows us to avoid per chain locks */
768         if (tdb->file->allrecord_lock.count) {
769                 if (tdb->file->allrecord_lock.ltype == F_RDLCK
770                     && ltype == F_WRLCK) {
771                         return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
772                                           "tdb_unlock_hashes RO allrecord!");
773                 }
774                 return TDB_SUCCESS;
775         }
776
777         return tdb_nest_unlock(tdb, l, ltype);
778 }
779
780 /* Hash locks use TDB_HASH_LOCK_START + the next 30 bits.
781  * Then we begin; bucket offsets are sizeof(tdb_len_t) apart, so we divide.
782  * The result is that on 32 bit systems we don't use lock values > 2^31 on
783  * files that are less than 4GB.
784  */
785 static tdb_off_t free_lock_off(tdb_off_t b_off)
786 {
787         return TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
788                 + b_off / sizeof(tdb_off_t);
789 }
790
791 enum TDB_ERROR tdb_lock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off,
792                                     enum tdb_lock_flags waitflag)
793 {
794         assert(b_off >= sizeof(struct tdb_header));
795
796         if (tdb->flags & TDB_NOLOCK)
797                 return 0;
798
799         /* a allrecord lock allows us to avoid per chain locks */
800         if (tdb->file->allrecord_lock.count) {
801                 if (!check_lock_pid(tdb, "tdb_lock_free_bucket", true))
802                         return TDB_ERR_LOCK;
803
804                 if (tdb->file->allrecord_lock.ltype == F_WRLCK)
805                         return 0;
806                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
807                                   "tdb_lock_free_bucket with"
808                                   " read-only allrecordlock!");
809         }
810
811 #if 0 /* FIXME */
812         if (tdb_has_expansion_lock(tdb)) {
813                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
814                                   "tdb_lock_free_bucket:"
815                                   " already have expansion lock");
816         }
817 #endif
818
819         return tdb_nest_lock(tdb, free_lock_off(b_off), F_WRLCK, waitflag);
820 }
821
822 void tdb_unlock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off)
823 {
824         if (tdb->file->allrecord_lock.count)
825                 return;
826
827         tdb_nest_unlock(tdb, free_lock_off(b_off), F_WRLCK);
828 }
829
830 enum TDB_ERROR tdb_lockall(struct tdb_context *tdb)
831 {
832         return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
833 }
834
835 void tdb_unlockall(struct tdb_context *tdb)
836 {
837         tdb_allrecord_unlock(tdb, F_WRLCK);
838 }
839
840 enum TDB_ERROR tdb_lockall_read(struct tdb_context *tdb)
841 {
842         return tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
843 }
844
845 void tdb_unlockall_read(struct tdb_context *tdb)
846 {
847         tdb_allrecord_unlock(tdb, F_RDLCK);
848 }
849
850 void tdb_lock_cleanup(struct tdb_context *tdb)
851 {
852         unsigned int i;
853
854         while (tdb->file->allrecord_lock.count
855                && tdb->file->allrecord_lock.owner == tdb) {
856                 tdb_allrecord_unlock(tdb, tdb->file->allrecord_lock.ltype);
857         }
858
859         for (i=0; i<tdb->file->num_lockrecs; i++) {
860                 if (tdb->file->lockrecs[i].owner == tdb) {
861                         tdb_nest_unlock(tdb,
862                                         tdb->file->lockrecs[i].off,
863                                         tdb->file->lockrecs[i].ltype);
864                         i--;
865                 }
866         }
867 }