]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/lock.c
957adf83358057d458d09320d1ce546feb62bffe
[ccan] / ccan / tdb2 / lock.c
1  /*
2    Unix SMB/CIFS implementation.
3
4    trivial database library
5
6    Copyright (C) Andrew Tridgell              1999-2005
7    Copyright (C) Paul `Rusty' Russell              2000
8    Copyright (C) Jeremy Allison                    2000-2003
9
10      ** NOTE! The following LGPL license applies to the tdb
11      ** library. This does NOT imply that all of Samba is released
12      ** under the LGPL
13
14    This library is free software; you can redistribute it and/or
15    modify it under the terms of the GNU Lesser General Public
16    License as published by the Free Software Foundation; either
17    version 3 of the License, or (at your option) any later version.
18
19    This library is distributed in the hope that it will be useful,
20    but WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    Lesser General Public License for more details.
23
24    You should have received a copy of the GNU Lesser General Public
25    License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 */
27
28 #include "private.h"
29 #include <assert.h>
30 #include <ccan/build_assert/build_assert.h>
31
32 /* If we were threaded, we could wait for unlock, but we're not, so fail. */
33 static enum TDB_ERROR owner_conflict(struct tdb_context *tdb, const char *call)
34 {
35         return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
36                           "%s: lock owned by another tdb in this process.",
37                           call);
38 }
39
40 /* If we fork, we no longer really own locks. */
41 static bool check_lock_pid(struct tdb_context *tdb,
42                            const char *call, bool log)
43 {
44         /* No locks?  No problem! */
45         if (tdb->file->allrecord_lock.count == 0
46             && tdb->file->num_lockrecs == 0) {
47                 return true;
48         }
49
50         /* No fork?  No problem! */
51         if (tdb->file->locker == getpid()) {
52                 return true;
53         }
54
55         if (log) {
56                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
57                            "%s: fork() detected after lock acquisition!"
58                            " (%u vs %u)", call, tdb->file->locker, getpid());
59         }
60         return false;
61 }
62
63 int tdb_fcntl_lock(int fd, int rw, off_t off, off_t len, bool waitflag,
64                    void *unused)
65 {
66         struct flock fl;
67         int ret;
68
69         do {
70                 fl.l_type = rw;
71                 fl.l_whence = SEEK_SET;
72                 fl.l_start = off;
73                 fl.l_len = len;
74
75                 if (waitflag)
76                         ret = fcntl(fd, F_SETLKW, &fl);
77                 else
78                         ret = fcntl(fd, F_SETLK, &fl);
79         } while (ret != 0 && errno == EINTR);
80         return ret;
81 }
82
83 int tdb_fcntl_unlock(int fd, int rw, off_t off, off_t len, void *unused)
84 {
85         struct flock fl;
86         int ret;
87
88         do {
89                 fl.l_type = F_UNLCK;
90                 fl.l_whence = SEEK_SET;
91                 fl.l_start = off;
92                 fl.l_len = len;
93
94                 ret = fcntl(fd, F_SETLKW, &fl);
95         } while (ret != 0 && errno == EINTR);
96         return ret;
97 }
98
99 static int lock(struct tdb_context *tdb,
100                       int rw, off_t off, off_t len, bool waitflag)
101 {
102         int ret;
103         if (tdb->file->allrecord_lock.count == 0
104             && tdb->file->num_lockrecs == 0) {
105                 tdb->file->locker = getpid();
106         }
107
108         tdb->stats.lock_lowlevel++;
109         ret = tdb->lock_fn(tdb->file->fd, rw, off, len, waitflag,
110                            tdb->lock_data);
111         if (!waitflag) {
112                 tdb->stats.lock_nonblock++;
113                 if (ret != 0)
114                         tdb->stats.lock_nonblock_fail++;
115         }
116         return ret;
117 }
118
119 static int unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
120 {
121 #if 0 /* Check they matched up locks and unlocks correctly. */
122         char line[80];
123         FILE *locks;
124         bool found = false;
125
126         locks = fopen("/proc/locks", "r");
127
128         while (fgets(line, 80, locks)) {
129                 char *p;
130                 int type, start, l;
131
132                 /* eg. 1: FLOCK  ADVISORY  WRITE 2440 08:01:2180826 0 EOF */
133                 p = strchr(line, ':') + 1;
134                 if (strncmp(p, " POSIX  ADVISORY  ", strlen(" POSIX  ADVISORY  ")))
135                         continue;
136                 p += strlen(" FLOCK  ADVISORY  ");
137                 if (strncmp(p, "READ  ", strlen("READ  ")) == 0)
138                         type = F_RDLCK;
139                 else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0)
140                         type = F_WRLCK;
141                 else
142                         abort();
143                 p += 6;
144                 if (atoi(p) != getpid())
145                         continue;
146                 p = strchr(strchr(p, ' ') + 1, ' ') + 1;
147                 start = atoi(p);
148                 p = strchr(p, ' ') + 1;
149                 if (strncmp(p, "EOF", 3) == 0)
150                         l = 0;
151                 else
152                         l = atoi(p) - start + 1;
153
154                 if (off == start) {
155                         if (len != l) {
156                                 fprintf(stderr, "Len %u should be %u: %s",
157                                         (int)len, l, line);
158                                 abort();
159                         }
160                         if (type != rw) {
161                                 fprintf(stderr, "Type %s wrong: %s",
162                                         rw == F_RDLCK ? "READ" : "WRITE", line);
163                                 abort();
164                         }
165                         found = true;
166                         break;
167                 }
168         }
169
170         if (!found) {
171                 fprintf(stderr, "Unlock on %u@%u not found!",
172                         (int)off, (int)len);
173                 abort();
174         }
175
176         fclose(locks);
177 #endif
178
179         return tdb->unlock_fn(tdb->file->fd, rw, off, len, tdb->lock_data);
180 }
181
182 /* a byte range locking function - return 0 on success
183    this functions locks len bytes at the specified offset.
184
185    note that a len of zero means lock to end of file
186 */
187 static enum TDB_ERROR tdb_brlock(struct tdb_context *tdb,
188                                  int rw_type, tdb_off_t offset, tdb_off_t len,
189                                  enum tdb_lock_flags flags)
190 {
191         int ret;
192
193         if (tdb->flags & TDB_NOLOCK) {
194                 return TDB_SUCCESS;
195         }
196
197         if (rw_type == F_WRLCK && (tdb->flags & TDB_RDONLY)) {
198                 return tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_LOG_USE_ERROR,
199                                   "Write lock attempted on read-only database");
200         }
201
202         /* A 32 bit system cannot open a 64-bit file, but it could have
203          * expanded since then: check here. */
204         if ((size_t)(offset + len) != offset + len) {
205                 return tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
206                                   "tdb_brlock: lock on giant offset %llu",
207                                   (long long)(offset + len));
208         }
209
210         ret = lock(tdb, rw_type, offset, len, flags & TDB_LOCK_WAIT);
211         if (ret != 0) {
212                 /* Generic lock error. errno set by fcntl.
213                  * EAGAIN is an expected return from non-blocking
214                  * locks. */
215                 if (!(flags & TDB_LOCK_PROBE)
216                     && (errno != EAGAIN && errno != EINTR)) {
217                         tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
218                                    "tdb_brlock failed (fd=%d) at"
219                                    " offset %zu rw_type=%d flags=%d len=%zu:"
220                                    " %s",
221                                    tdb->file->fd, (size_t)offset, rw_type,
222                                    flags, (size_t)len, strerror(errno));
223                 }
224                 return TDB_ERR_LOCK;
225         }
226         return TDB_SUCCESS;
227 }
228
229 static enum TDB_ERROR tdb_brunlock(struct tdb_context *tdb,
230                                    int rw_type, tdb_off_t offset, size_t len)
231 {
232         if (tdb->flags & TDB_NOLOCK) {
233                 return TDB_SUCCESS;
234         }
235
236         if (!check_lock_pid(tdb, "tdb_brunlock", true))
237                 return TDB_ERR_LOCK;
238
239         if (unlock(tdb, rw_type, offset, len) == -1) {
240                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
241                                   "tdb_brunlock failed (fd=%d) at offset %zu"
242                                   " rw_type=%d len=%zu: %s",
243                                   tdb->file->fd, (size_t)offset, rw_type,
244                                   (size_t)len, strerror(errno));
245         }
246         return TDB_SUCCESS;
247 }
248
249 /*
250   upgrade a read lock to a write lock. This needs to be handled in a
251   special way as some OSes (such as solaris) have too conservative
252   deadlock detection and claim a deadlock when progress can be
253   made. For those OSes we may loop for a while.
254 */
255 enum TDB_ERROR tdb_allrecord_upgrade(struct tdb_context *tdb)
256 {
257         int count = 1000;
258
259         if (!check_lock_pid(tdb, "tdb_transaction_prepare_commit", true))
260                 return TDB_ERR_LOCK;
261
262         if (tdb->file->allrecord_lock.count != 1) {
263                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
264                                   "tdb_allrecord_upgrade failed:"
265                                   " count %u too high",
266                                   tdb->file->allrecord_lock.count);
267         }
268
269         if (tdb->file->allrecord_lock.off != 1) {
270                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
271                                   "tdb_allrecord_upgrade failed:"
272                                   " already upgraded?");
273         }
274
275         if (tdb->file->allrecord_lock.owner != tdb) {
276                 return owner_conflict(tdb, "tdb_allrecord_upgrade");
277         }
278
279         while (count--) {
280                 struct timeval tv;
281                 if (tdb_brlock(tdb, F_WRLCK,
282                                TDB_HASH_LOCK_START, 0,
283                                TDB_LOCK_WAIT|TDB_LOCK_PROBE) == TDB_SUCCESS) {
284                         tdb->file->allrecord_lock.ltype = F_WRLCK;
285                         tdb->file->allrecord_lock.off = 0;
286                         return TDB_SUCCESS;
287                 }
288                 if (errno != EDEADLK) {
289                         break;
290                 }
291                 /* sleep for as short a time as we can - more portable than usleep() */
292                 tv.tv_sec = 0;
293                 tv.tv_usec = 1;
294                 select(0, NULL, NULL, NULL, &tv);
295         }
296
297         if (errno != EAGAIN && errno != EINTR)
298                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
299                            "tdb_allrecord_upgrade failed");
300         return TDB_ERR_LOCK;
301 }
302
303 static struct tdb_lock *find_nestlock(struct tdb_context *tdb, tdb_off_t offset,
304                                       const struct tdb_context *owner)
305 {
306         unsigned int i;
307
308         for (i=0; i<tdb->file->num_lockrecs; i++) {
309                 if (tdb->file->lockrecs[i].off == offset) {
310                         if (owner && tdb->file->lockrecs[i].owner != owner)
311                                 return NULL;
312                         return &tdb->file->lockrecs[i];
313                 }
314         }
315         return NULL;
316 }
317
318 enum TDB_ERROR tdb_lock_and_recover(struct tdb_context *tdb)
319 {
320         enum TDB_ERROR ecode;
321
322         if (!check_lock_pid(tdb, "tdb_transaction_prepare_commit", true))
323                 return TDB_ERR_LOCK;
324
325         ecode = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK,
326                                    false);
327         if (ecode != TDB_SUCCESS) {
328                 return ecode;
329         }
330
331         ecode = tdb_lock_open(tdb, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK);
332         if (ecode != TDB_SUCCESS) {
333                 tdb_allrecord_unlock(tdb, F_WRLCK);
334                 return ecode;
335         }
336         ecode = tdb_transaction_recover(tdb);
337         tdb_unlock_open(tdb, F_WRLCK);
338         tdb_allrecord_unlock(tdb, F_WRLCK);
339
340         return ecode;
341 }
342
343 /* lock an offset in the database. */
344 static enum TDB_ERROR tdb_nest_lock(struct tdb_context *tdb,
345                                     tdb_off_t offset, int ltype,
346                                     enum tdb_lock_flags flags)
347 {
348         struct tdb_lock *new_lck;
349         enum TDB_ERROR ecode;
350
351         if (offset > (TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
352                       + tdb->file->map_size / 8)) {
353                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
354                                   "tdb_nest_lock: invalid offset %zu ltype=%d",
355                                   (size_t)offset, ltype);
356         }
357
358         if (tdb->flags & TDB_NOLOCK)
359                 return TDB_SUCCESS;
360
361         if (!check_lock_pid(tdb, "tdb_nest_lock", true)) {
362                 return TDB_ERR_LOCK;
363         }
364
365         tdb->stats.locks++;
366
367         new_lck = find_nestlock(tdb, offset, NULL);
368         if (new_lck) {
369                 if (new_lck->owner != tdb) {
370                         return owner_conflict(tdb, "tdb_nest_lock");
371                 }
372
373                 if (new_lck->ltype == F_RDLCK && ltype == F_WRLCK) {
374                         return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
375                                           "tdb_nest_lock:"
376                                           " offset %zu has read lock",
377                                           (size_t)offset);
378                 }
379                 /* Just increment the struct, posix locks don't stack. */
380                 new_lck->count++;
381                 return TDB_SUCCESS;
382         }
383
384 #if 0
385         if (tdb->file->num_lockrecs
386             && offset >= TDB_HASH_LOCK_START
387             && offset < TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE) {
388                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
389                                   "tdb_nest_lock: already have a hash lock?");
390         }
391 #endif
392
393         new_lck = (struct tdb_lock *)realloc(
394                 tdb->file->lockrecs,
395                 sizeof(*tdb->file->lockrecs) * (tdb->file->num_lockrecs+1));
396         if (new_lck == NULL) {
397                 return tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
398                                   "tdb_nest_lock:"
399                                   " unable to allocate %zu lock struct",
400                                   tdb->file->num_lockrecs + 1);
401         }
402         tdb->file->lockrecs = new_lck;
403
404         /* Since fcntl locks don't nest, we do a lock for the first one,
405            and simply bump the count for future ones */
406         ecode = tdb_brlock(tdb, ltype, offset, 1, flags);
407         if (ecode != TDB_SUCCESS) {
408                 return ecode;
409         }
410
411         /* First time we grab a lock, perhaps someone died in commit? */
412         if (!(flags & TDB_LOCK_NOCHECK)
413             && tdb->file->num_lockrecs == 0) {
414                 tdb_bool_err berr = tdb_needs_recovery(tdb);
415                 if (berr != false) {
416                         tdb_brunlock(tdb, ltype, offset, 1);
417
418                         if (berr < 0)
419                                 return berr;
420                         ecode = tdb_lock_and_recover(tdb);
421                         if (ecode == TDB_SUCCESS) {
422                                 ecode = tdb_brlock(tdb, ltype, offset, 1,
423                                                    flags);
424                         }
425                         if (ecode != TDB_SUCCESS) {
426                                 return ecode;
427                         }
428                 }
429         }
430
431         tdb->file->lockrecs[tdb->file->num_lockrecs].owner = tdb;
432         tdb->file->lockrecs[tdb->file->num_lockrecs].off = offset;
433         tdb->file->lockrecs[tdb->file->num_lockrecs].count = 1;
434         tdb->file->lockrecs[tdb->file->num_lockrecs].ltype = ltype;
435         tdb->file->num_lockrecs++;
436
437         return TDB_SUCCESS;
438 }
439
440 static enum TDB_ERROR tdb_nest_unlock(struct tdb_context *tdb,
441                                       tdb_off_t off, int ltype)
442 {
443         struct tdb_lock *lck;
444         enum TDB_ERROR ecode;
445
446         if (tdb->flags & TDB_NOLOCK)
447                 return TDB_SUCCESS;
448
449         lck = find_nestlock(tdb, off, tdb);
450         if ((lck == NULL) || (lck->count == 0)) {
451                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
452                                   "tdb_nest_unlock: no lock for %zu",
453                                   (size_t)off);
454         }
455
456         if (lck->count > 1) {
457                 lck->count--;
458                 return TDB_SUCCESS;
459         }
460
461         /*
462          * This lock has count==1 left, so we need to unlock it in the
463          * kernel. We don't bother with decrementing the in-memory array
464          * element, we're about to overwrite it with the last array element
465          * anyway.
466          */
467         ecode = tdb_brunlock(tdb, ltype, off, 1);
468
469         /*
470          * Shrink the array by overwriting the element just unlocked with the
471          * last array element.
472          */
473         *lck = tdb->file->lockrecs[--tdb->file->num_lockrecs];
474
475         return ecode;
476 }
477
478 /*
479   get the transaction lock
480  */
481 enum TDB_ERROR tdb_transaction_lock(struct tdb_context *tdb, int ltype)
482 {
483         return tdb_nest_lock(tdb, TDB_TRANSACTION_LOCK, ltype, TDB_LOCK_WAIT);
484 }
485
486 /*
487   release the transaction lock
488  */
489 void tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
490 {
491         tdb_nest_unlock(tdb, TDB_TRANSACTION_LOCK, ltype);
492 }
493
494 /* We only need to lock individual bytes, but Linux merges consecutive locks
495  * so we lock in contiguous ranges. */
496 static enum TDB_ERROR tdb_lock_gradual(struct tdb_context *tdb,
497                                        int ltype, enum tdb_lock_flags flags,
498                                        tdb_off_t off, tdb_off_t len)
499 {
500         enum TDB_ERROR ecode;
501         enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT);
502
503         if (len <= 1) {
504                 /* 0 would mean to end-of-file... */
505                 assert(len != 0);
506                 /* Single hash.  Just do blocking lock. */
507                 return tdb_brlock(tdb, ltype, off, len, flags);
508         }
509
510         /* First we try non-blocking. */
511         ecode = tdb_brlock(tdb, ltype, off, len, nb_flags);
512         if (ecode != TDB_ERR_LOCK) {
513                 return ecode;
514         }
515
516         /* Try locking first half, then second. */
517         ecode = tdb_lock_gradual(tdb, ltype, flags, off, len / 2);
518         if (ecode != TDB_SUCCESS)
519                 return ecode;
520
521         ecode = tdb_lock_gradual(tdb, ltype, flags,
522                                  off + len / 2, len - len / 2);
523         if (ecode != TDB_SUCCESS) {
524                 tdb_brunlock(tdb, ltype, off, len / 2);
525         }
526         return ecode;
527 }
528
529 /* lock/unlock entire database.  It can only be upgradable if you have some
530  * other way of guaranteeing exclusivity (ie. transaction write lock). */
531 enum TDB_ERROR tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
532                                   enum tdb_lock_flags flags, bool upgradable)
533 {
534         enum TDB_ERROR ecode;
535         tdb_bool_err berr;
536
537         if (tdb->flags & TDB_NOLOCK)
538                 return TDB_SUCCESS;
539
540         if (!check_lock_pid(tdb, "tdb_allrecord_lock", true)) {
541                 return TDB_ERR_LOCK;
542         }
543
544         if (tdb->file->allrecord_lock.count) {
545                 if (tdb->file->allrecord_lock.owner != tdb) {
546                         return owner_conflict(tdb, "tdb_allrecord_lock");
547                 }
548
549                 if (ltype == F_RDLCK
550                     || tdb->file->allrecord_lock.ltype == F_WRLCK) {
551                         tdb->file->allrecord_lock.count++;
552                         return TDB_SUCCESS;
553                 }
554
555                 /* a global lock of a different type exists */
556                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
557                                   "tdb_allrecord_lock: already have %s lock",
558                                   tdb->file->allrecord_lock.ltype == F_RDLCK
559                                   ? "read" : "write");
560         }
561
562         if (tdb_has_hash_locks(tdb)) {
563                 /* can't combine global and chain locks */
564                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
565                                   "tdb_allrecord_lock:"
566                                   " already have chain lock");
567         }
568
569         if (upgradable && ltype != F_RDLCK) {
570                 /* tdb error: you can't upgrade a write lock! */
571                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
572                                   "tdb_allrecord_lock:"
573                                   " can't upgrade a write lock");
574         }
575
576         tdb->stats.locks++;
577 again:
578         /* Lock hashes, gradually. */
579         ecode = tdb_lock_gradual(tdb, ltype, flags, TDB_HASH_LOCK_START,
580                                  TDB_HASH_LOCK_RANGE);
581         if (ecode != TDB_SUCCESS)
582                 return ecode;
583
584         /* Lock free tables: there to end of file. */
585         ecode = tdb_brlock(tdb, ltype,
586                            TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE,
587                            0, flags);
588         if (ecode != TDB_SUCCESS) {
589                 tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START,
590                              TDB_HASH_LOCK_RANGE);
591                 return ecode;
592         }
593
594         tdb->file->allrecord_lock.owner = tdb;
595         tdb->file->allrecord_lock.count = 1;
596         /* If it's upgradable, it's actually exclusive so we can treat
597          * it as a write lock. */
598         tdb->file->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
599         tdb->file->allrecord_lock.off = upgradable;
600
601         /* Now check for needing recovery. */
602         if (flags & TDB_LOCK_NOCHECK)
603                 return TDB_SUCCESS;
604
605         berr = tdb_needs_recovery(tdb);
606         if (likely(berr == false))
607                 return TDB_SUCCESS;
608
609         tdb_allrecord_unlock(tdb, ltype);
610         if (berr < 0)
611                 return berr;
612         ecode = tdb_lock_and_recover(tdb);
613         if (ecode != TDB_SUCCESS) {
614                 return ecode;
615         }
616         goto again;
617 }
618
619 enum TDB_ERROR tdb_lock_open(struct tdb_context *tdb,
620                              int ltype, enum tdb_lock_flags flags)
621 {
622         return tdb_nest_lock(tdb, TDB_OPEN_LOCK, ltype, flags);
623 }
624
625 void tdb_unlock_open(struct tdb_context *tdb, int ltype)
626 {
627         tdb_nest_unlock(tdb, TDB_OPEN_LOCK, ltype);
628 }
629
630 bool tdb_has_open_lock(struct tdb_context *tdb)
631 {
632         return !(tdb->flags & TDB_NOLOCK)
633                 && find_nestlock(tdb, TDB_OPEN_LOCK, tdb) != NULL;
634 }
635
636 enum TDB_ERROR tdb_lock_expand(struct tdb_context *tdb, int ltype)
637 {
638         /* Lock doesn't protect data, so don't check (we recurse if we do!) */
639         return tdb_nest_lock(tdb, TDB_EXPANSION_LOCK, ltype,
640                              TDB_LOCK_WAIT | TDB_LOCK_NOCHECK);
641 }
642
643 void tdb_unlock_expand(struct tdb_context *tdb, int ltype)
644 {
645         tdb_nest_unlock(tdb, TDB_EXPANSION_LOCK, ltype);
646 }
647
648 /* unlock entire db */
649 void tdb_allrecord_unlock(struct tdb_context *tdb, int ltype)
650 {
651         if (tdb->flags & TDB_NOLOCK)
652                 return;
653
654         if (tdb->file->allrecord_lock.count == 0) {
655                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
656                            "tdb_allrecord_unlock: not locked!");
657                 return;
658         }
659
660         if (tdb->file->allrecord_lock.owner != tdb) {
661                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
662                            "tdb_allrecord_unlock: not locked by us!");
663                 return;
664         }
665
666         /* Upgradable locks are marked as write locks. */
667         if (tdb->file->allrecord_lock.ltype != ltype
668             && (!tdb->file->allrecord_lock.off || ltype != F_RDLCK)) {
669                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
670                            "tdb_allrecord_unlock: have %s lock",
671                            tdb->file->allrecord_lock.ltype == F_RDLCK
672                            ? "read" : "write");
673                 return;
674         }
675
676         if (tdb->file->allrecord_lock.count > 1) {
677                 tdb->file->allrecord_lock.count--;
678                 return;
679         }
680
681         tdb->file->allrecord_lock.count = 0;
682         tdb->file->allrecord_lock.ltype = 0;
683
684         tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START, 0);
685 }
686
687 bool tdb_has_expansion_lock(struct tdb_context *tdb)
688 {
689         return find_nestlock(tdb, TDB_EXPANSION_LOCK, tdb) != NULL;
690 }
691
692 bool tdb_has_hash_locks(struct tdb_context *tdb)
693 {
694         unsigned int i;
695
696         for (i=0; i<tdb->file->num_lockrecs; i++) {
697                 if (tdb->file->lockrecs[i].off >= TDB_HASH_LOCK_START
698                     && tdb->file->lockrecs[i].off < (TDB_HASH_LOCK_START
699                                                      + TDB_HASH_LOCK_RANGE))
700                         return true;
701         }
702         return false;
703 }
704
705 static bool tdb_has_free_lock(struct tdb_context *tdb)
706 {
707         unsigned int i;
708
709         if (tdb->flags & TDB_NOLOCK)
710                 return false;
711
712         for (i=0; i<tdb->file->num_lockrecs; i++) {
713                 if (tdb->file->lockrecs[i].off
714                     > TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE)
715                         return true;
716         }
717         return false;
718 }
719
720 enum TDB_ERROR tdb_lock_hashes(struct tdb_context *tdb,
721                                tdb_off_t hash_lock,
722                                tdb_len_t hash_range,
723                                int ltype, enum tdb_lock_flags waitflag)
724 {
725         /* FIXME: Do this properly, using hlock_range */
726         unsigned l = TDB_HASH_LOCK_START
727                 + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
728
729         /* a allrecord lock allows us to avoid per chain locks */
730         if (tdb->file->allrecord_lock.count) {
731                 if (!check_lock_pid(tdb, "tdb_lock_hashes", true))
732                         return TDB_ERR_LOCK;
733
734                 if (tdb->file->allrecord_lock.owner != tdb)
735                         return owner_conflict(tdb, "tdb_lock_hashes");
736                 if (ltype == tdb->file->allrecord_lock.ltype
737                     || ltype == F_RDLCK) {
738                         return TDB_SUCCESS;
739                 }
740
741                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
742                                   "tdb_lock_hashes:"
743                                   " already have %s allrecordlock",
744                                   tdb->file->allrecord_lock.ltype == F_RDLCK
745                                   ? "read" : "write");
746         }
747
748         if (tdb_has_free_lock(tdb)) {
749                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
750                                   "tdb_lock_hashes: already have free lock");
751         }
752
753         if (tdb_has_expansion_lock(tdb)) {
754                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
755                                   "tdb_lock_hashes:"
756                                   " already have expansion lock");
757         }
758
759         return tdb_nest_lock(tdb, l, ltype, waitflag);
760 }
761
762 enum TDB_ERROR tdb_unlock_hashes(struct tdb_context *tdb,
763                                  tdb_off_t hash_lock,
764                                  tdb_len_t hash_range, int ltype)
765 {
766         unsigned l = TDB_HASH_LOCK_START
767                 + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
768
769         if (tdb->flags & TDB_NOLOCK)
770                 return 0;
771
772         /* a allrecord lock allows us to avoid per chain locks */
773         if (tdb->file->allrecord_lock.count) {
774                 if (tdb->file->allrecord_lock.ltype == F_RDLCK
775                     && ltype == F_WRLCK) {
776                         return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
777                                           "tdb_unlock_hashes RO allrecord!");
778                 }
779                 return TDB_SUCCESS;
780         }
781
782         return tdb_nest_unlock(tdb, l, ltype);
783 }
784
785 /* Hash locks use TDB_HASH_LOCK_START + the next 30 bits.
786  * Then we begin; bucket offsets are sizeof(tdb_len_t) apart, so we divide.
787  * The result is that on 32 bit systems we don't use lock values > 2^31 on
788  * files that are less than 4GB.
789  */
790 static tdb_off_t free_lock_off(tdb_off_t b_off)
791 {
792         return TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
793                 + b_off / sizeof(tdb_off_t);
794 }
795
796 enum TDB_ERROR tdb_lock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off,
797                                     enum tdb_lock_flags waitflag)
798 {
799         assert(b_off >= sizeof(struct tdb_header));
800
801         if (tdb->flags & TDB_NOLOCK)
802                 return 0;
803
804         /* a allrecord lock allows us to avoid per chain locks */
805         if (tdb->file->allrecord_lock.count) {
806                 if (!check_lock_pid(tdb, "tdb_lock_free_bucket", true))
807                         return TDB_ERR_LOCK;
808
809                 if (tdb->file->allrecord_lock.ltype == F_WRLCK)
810                         return 0;
811                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
812                                   "tdb_lock_free_bucket with"
813                                   " read-only allrecordlock!");
814         }
815
816 #if 0 /* FIXME */
817         if (tdb_has_expansion_lock(tdb)) {
818                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
819                                   "tdb_lock_free_bucket:"
820                                   " already have expansion lock");
821         }
822 #endif
823
824         return tdb_nest_lock(tdb, free_lock_off(b_off), F_WRLCK, waitflag);
825 }
826
827 void tdb_unlock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off)
828 {
829         if (tdb->file->allrecord_lock.count)
830                 return;
831
832         tdb_nest_unlock(tdb, free_lock_off(b_off), F_WRLCK);
833 }
834
835 enum TDB_ERROR tdb_lockall(struct tdb_context *tdb)
836 {
837         return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
838 }
839
840 void tdb_unlockall(struct tdb_context *tdb)
841 {
842         tdb_allrecord_unlock(tdb, F_WRLCK);
843 }
844
845 enum TDB_ERROR tdb_lockall_read(struct tdb_context *tdb)
846 {
847         return tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
848 }
849
850 void tdb_unlockall_read(struct tdb_context *tdb)
851 {
852         tdb_allrecord_unlock(tdb, F_RDLCK);
853 }
854
855 void tdb_lock_cleanup(struct tdb_context *tdb)
856 {
857         unsigned int i;
858
859         /* We don't want to warn: they're allowed to close tdb after fork. */
860         if (!check_lock_pid(tdb, "tdb_close", false))
861                 return;
862
863         while (tdb->file->allrecord_lock.count
864                && tdb->file->allrecord_lock.owner == tdb) {
865                 tdb_allrecord_unlock(tdb, tdb->file->allrecord_lock.ltype);
866         }
867
868         for (i=0; i<tdb->file->num_lockrecs; i++) {
869                 if (tdb->file->lockrecs[i].owner == tdb) {
870                         tdb_nest_unlock(tdb,
871                                         tdb->file->lockrecs[i].off,
872                                         tdb->file->lockrecs[i].ltype);
873                         i--;
874                 }
875         }
876 }