]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/lock.c
c5dd084e82bf04af7c1dee6b12996f25749276cc
[ccan] / ccan / tdb2 / lock.c
1  /*
2    Unix SMB/CIFS implementation.
3
4    trivial database library
5
6    Copyright (C) Andrew Tridgell              1999-2005
7    Copyright (C) Paul `Rusty' Russell              2000
8    Copyright (C) Jeremy Allison                    2000-2003
9
10      ** NOTE! The following LGPL license applies to the tdb
11      ** library. This does NOT imply that all of Samba is released
12      ** under the LGPL
13
14    This library is free software; you can redistribute it and/or
15    modify it under the terms of the GNU Lesser General Public
16    License as published by the Free Software Foundation; either
17    version 3 of the License, or (at your option) any later version.
18
19    This library is distributed in the hope that it will be useful,
20    but WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    Lesser General Public License for more details.
23
24    You should have received a copy of the GNU Lesser General Public
25    License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 */
27
28 #include "private.h"
29 #include <assert.h>
30 #include <ccan/build_assert/build_assert.h>
31
32 static int fcntl_lock(struct tdb_context *tdb,
33                       int rw, off_t off, off_t len, bool waitflag)
34 {
35         struct flock fl;
36
37         fl.l_type = rw;
38         fl.l_whence = SEEK_SET;
39         fl.l_start = off;
40         fl.l_len = len;
41         fl.l_pid = 0;
42
43         add_stat(tdb, lock_lowlevel, 1);
44         if (waitflag)
45                 return fcntl(tdb->fd, F_SETLKW, &fl);
46         else {
47                 add_stat(tdb, lock_nonblock, 1);
48                 return fcntl(tdb->fd, F_SETLK, &fl);
49         }
50 }
51
52 static int fcntl_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
53 {
54         struct flock fl;
55 #if 0 /* Check they matched up locks and unlocks correctly. */
56         char line[80];
57         FILE *locks;
58         bool found = false;
59
60         locks = fopen("/proc/locks", "r");
61
62         while (fgets(line, 80, locks)) {
63                 char *p;
64                 int type, start, l;
65
66                 /* eg. 1: FLOCK  ADVISORY  WRITE 2440 08:01:2180826 0 EOF */
67                 p = strchr(line, ':') + 1;
68                 if (strncmp(p, " POSIX  ADVISORY  ", strlen(" POSIX  ADVISORY  ")))
69                         continue;
70                 p += strlen(" FLOCK  ADVISORY  ");
71                 if (strncmp(p, "READ  ", strlen("READ  ")) == 0)
72                         type = F_RDLCK;
73                 else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0)
74                         type = F_WRLCK;
75                 else
76                         abort();
77                 p += 6;
78                 if (atoi(p) != getpid())
79                         continue;
80                 p = strchr(strchr(p, ' ') + 1, ' ') + 1;
81                 start = atoi(p);
82                 p = strchr(p, ' ') + 1;
83                 if (strncmp(p, "EOF", 3) == 0)
84                         l = 0;
85                 else
86                         l = atoi(p) - start + 1;
87
88                 if (off == start) {
89                         if (len != l) {
90                                 fprintf(stderr, "Len %u should be %u: %s",
91                                         (int)len, l, line);
92                                 abort();
93                         }
94                         if (type != rw) {
95                                 fprintf(stderr, "Type %s wrong: %s",
96                                         rw == F_RDLCK ? "READ" : "WRITE", line);
97                                 abort();
98                         }
99                         found = true;
100                         break;
101                 }
102         }
103
104         if (!found) {
105                 fprintf(stderr, "Unlock on %u@%u not found!",
106                         (int)off, (int)len);
107                 abort();
108         }
109
110         fclose(locks);
111 #endif
112
113         fl.l_type = F_UNLCK;
114         fl.l_whence = SEEK_SET;
115         fl.l_start = off;
116         fl.l_len = len;
117         fl.l_pid = 0;
118
119         return fcntl(tdb->fd, F_SETLKW, &fl);
120 }
121
122 /* a byte range locking function - return 0 on success
123    this functions locks/unlocks 1 byte at the specified offset.
124
125    note that a len of zero means lock to end of file
126 */
127 static enum TDB_ERROR tdb_brlock(struct tdb_context *tdb,
128                                  int rw_type, tdb_off_t offset, tdb_off_t len,
129                                  enum tdb_lock_flags flags)
130 {
131         int ret;
132
133         if (tdb->flags & TDB_NOLOCK) {
134                 return TDB_SUCCESS;
135         }
136
137         if (rw_type == F_WRLCK && tdb->read_only) {
138                 return tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_LOG_USE_ERROR,
139                                   "Write lock attempted on read-only database");
140         }
141
142         /* A 32 bit system cannot open a 64-bit file, but it could have
143          * expanded since then: check here. */
144         if ((size_t)(offset + len) != offset + len) {
145                 return tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
146                                   "tdb_brlock: lock on giant offset %llu",
147                                   (long long)(offset + len));
148         }
149
150         do {
151                 ret = fcntl_lock(tdb, rw_type, offset, len,
152                                  flags & TDB_LOCK_WAIT);
153         } while (ret == -1 && errno == EINTR);
154
155         if (ret == -1) {
156                 /* Generic lock error. errno set by fcntl.
157                  * EAGAIN is an expected return from non-blocking
158                  * locks. */
159                 if (!(flags & TDB_LOCK_PROBE) && errno != EAGAIN) {
160                         tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
161                                    "tdb_brlock failed (fd=%d) at"
162                                    " offset %zu rw_type=%d flags=%d len=%zu:"
163                                    " %s",
164                                    tdb->fd, (size_t)offset, rw_type,
165                                    flags, (size_t)len, strerror(errno));
166                 }
167                 return TDB_ERR_LOCK;
168         }
169         return TDB_SUCCESS;
170 }
171
172 static enum TDB_ERROR tdb_brunlock(struct tdb_context *tdb,
173                                    int rw_type, tdb_off_t offset, size_t len)
174 {
175         int ret;
176
177         if (tdb->flags & TDB_NOLOCK) {
178                 return TDB_SUCCESS;
179         }
180
181         do {
182                 ret = fcntl_unlock(tdb, rw_type, offset, len);
183         } while (ret == -1 && errno == EINTR);
184
185         if (ret == -1) {
186                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
187                                   "tdb_brunlock failed (fd=%d) at offset %zu"
188                                   " rw_type=%d len=%zu",
189                                   tdb->fd, (size_t)offset, rw_type,
190                                   (size_t)len);
191         }
192         return TDB_SUCCESS;
193 }
194
195 /*
196   upgrade a read lock to a write lock. This needs to be handled in a
197   special way as some OSes (such as solaris) have too conservative
198   deadlock detection and claim a deadlock when progress can be
199   made. For those OSes we may loop for a while.
200 */
201 enum TDB_ERROR tdb_allrecord_upgrade(struct tdb_context *tdb)
202 {
203         int count = 1000;
204
205         if (tdb->allrecord_lock.count != 1) {
206                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
207                                   "tdb_allrecord_upgrade failed:"
208                                   " count %u too high",
209                                   tdb->allrecord_lock.count);
210         }
211
212         if (tdb->allrecord_lock.off != 1) {
213                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
214                                   "tdb_allrecord_upgrade failed:"
215                                   " already upgraded?");
216         }
217
218         while (count--) {
219                 struct timeval tv;
220                 if (tdb_brlock(tdb, F_WRLCK,
221                                TDB_HASH_LOCK_START, 0,
222                                TDB_LOCK_WAIT|TDB_LOCK_PROBE) == TDB_SUCCESS) {
223                         tdb->allrecord_lock.ltype = F_WRLCK;
224                         tdb->allrecord_lock.off = 0;
225                         return TDB_SUCCESS;
226                 }
227                 if (errno != EDEADLK) {
228                         break;
229                 }
230                 /* sleep for as short a time as we can - more portable than usleep() */
231                 tv.tv_sec = 0;
232                 tv.tv_usec = 1;
233                 select(0, NULL, NULL, NULL, &tv);
234         }
235         return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
236                           "tdb_allrecord_upgrade failed");
237 }
238
239 static struct tdb_lock_type *find_nestlock(struct tdb_context *tdb,
240                                            tdb_off_t offset)
241 {
242         unsigned int i;
243
244         for (i=0; i<tdb->num_lockrecs; i++) {
245                 if (tdb->lockrecs[i].off == offset) {
246                         return &tdb->lockrecs[i];
247                 }
248         }
249         return NULL;
250 }
251
252 enum TDB_ERROR tdb_lock_and_recover(struct tdb_context *tdb)
253 {
254         enum TDB_ERROR ecode;
255
256         ecode = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK,
257                                    false);
258         if (ecode != TDB_SUCCESS) {
259                 return ecode;
260         }
261
262         ecode = tdb_lock_open(tdb, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK);
263         if (ecode != TDB_SUCCESS) {
264                 tdb_allrecord_unlock(tdb, F_WRLCK);
265                 return ecode;
266         }
267         ecode = tdb_transaction_recover(tdb);
268         tdb_unlock_open(tdb);
269         tdb_allrecord_unlock(tdb, F_WRLCK);
270
271         return ecode;
272 }
273
274 /* lock an offset in the database. */
275 static enum TDB_ERROR tdb_nest_lock(struct tdb_context *tdb,
276                                     tdb_off_t offset, int ltype,
277                                     enum tdb_lock_flags flags)
278 {
279         struct tdb_lock_type *new_lck;
280         enum TDB_ERROR ecode;
281
282         if (offset > TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE + tdb->map_size / 8) {
283                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
284                                   "tdb_nest_lock: invalid offset %zu ltype=%d",
285                                   (size_t)offset, ltype);
286         }
287
288         if (tdb->flags & TDB_NOLOCK)
289                 return TDB_SUCCESS;
290
291         add_stat(tdb, locks, 1);
292
293         new_lck = find_nestlock(tdb, offset);
294         if (new_lck) {
295                 if (new_lck->ltype == F_RDLCK && ltype == F_WRLCK) {
296                         return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
297                                           "tdb_nest_lock:"
298                                           " offset %zu has read lock",
299                                           (size_t)offset);
300                 }
301                 /* Just increment the struct, posix locks don't stack. */
302                 new_lck->count++;
303                 return TDB_SUCCESS;
304         }
305
306         if (tdb->num_lockrecs
307             && offset >= TDB_HASH_LOCK_START
308             && offset < TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE) {
309                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
310                                   "tdb_nest_lock: already have a hash lock?");
311         }
312
313         new_lck = (struct tdb_lock_type *)realloc(
314                 tdb->lockrecs,
315                 sizeof(*tdb->lockrecs) * (tdb->num_lockrecs+1));
316         if (new_lck == NULL) {
317                 return tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
318                                   "tdb_nest_lock:"
319                                   " unable to allocate %zu lock struct",
320                                   tdb->num_lockrecs + 1);
321         }
322         tdb->lockrecs = new_lck;
323
324         /* Since fcntl locks don't nest, we do a lock for the first one,
325            and simply bump the count for future ones */
326         ecode = tdb_brlock(tdb, ltype, offset, 1, flags);
327         if (ecode != TDB_SUCCESS) {
328                 return ecode;
329         }
330
331         /* First time we grab a lock, perhaps someone died in commit? */
332         if (!(flags & TDB_LOCK_NOCHECK)
333             && tdb->num_lockrecs == 0) {
334                 tdb_bool_err berr = tdb_needs_recovery(tdb);
335                 if (berr != false) {
336                         tdb_brunlock(tdb, ltype, offset, 1);
337
338                         if (berr < 0)
339                                 return berr;
340                         ecode = tdb_lock_and_recover(tdb);
341                         if (ecode == TDB_SUCCESS) {
342                                 ecode = tdb_brlock(tdb, ltype, offset, 1,
343                                                    flags);
344                         }
345                         if (ecode != TDB_SUCCESS) {
346                                 return ecode;
347                         }
348                 }
349         }
350
351         tdb->lockrecs[tdb->num_lockrecs].off = offset;
352         tdb->lockrecs[tdb->num_lockrecs].count = 1;
353         tdb->lockrecs[tdb->num_lockrecs].ltype = ltype;
354         tdb->num_lockrecs++;
355
356         return TDB_SUCCESS;
357 }
358
359 static enum TDB_ERROR tdb_nest_unlock(struct tdb_context *tdb,
360                                       tdb_off_t off, int ltype)
361 {
362         struct tdb_lock_type *lck;
363         enum TDB_ERROR ecode;
364
365         if (tdb->flags & TDB_NOLOCK)
366                 return TDB_SUCCESS;
367
368         lck = find_nestlock(tdb, off);
369         if ((lck == NULL) || (lck->count == 0)) {
370                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
371                                   "tdb_nest_unlock: no lock for %zu",
372                                   (size_t)off);
373         }
374
375         if (lck->count > 1) {
376                 lck->count--;
377                 return TDB_SUCCESS;
378         }
379
380         /*
381          * This lock has count==1 left, so we need to unlock it in the
382          * kernel. We don't bother with decrementing the in-memory array
383          * element, we're about to overwrite it with the last array element
384          * anyway.
385          */
386         ecode = tdb_brunlock(tdb, ltype, off, 1);
387
388         /*
389          * Shrink the array by overwriting the element just unlocked with the
390          * last array element.
391          */
392         *lck = tdb->lockrecs[--tdb->num_lockrecs];
393
394         return ecode;
395 }
396
397 /*
398   get the transaction lock
399  */
400 enum TDB_ERROR tdb_transaction_lock(struct tdb_context *tdb, int ltype)
401 {
402         return tdb_nest_lock(tdb, TDB_TRANSACTION_LOCK, ltype, TDB_LOCK_WAIT);
403 }
404
405 /*
406   release the transaction lock
407  */
408 void tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
409 {
410         tdb_nest_unlock(tdb, TDB_TRANSACTION_LOCK, ltype);
411 }
412
413 /* We only need to lock individual bytes, but Linux merges consecutive locks
414  * so we lock in contiguous ranges. */
415 static enum TDB_ERROR tdb_lock_gradual(struct tdb_context *tdb,
416                                        int ltype, enum tdb_lock_flags flags,
417                                        tdb_off_t off, tdb_off_t len)
418 {
419         enum TDB_ERROR ecode;
420         enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT);
421
422         if (len <= 1) {
423                 /* 0 would mean to end-of-file... */
424                 assert(len != 0);
425                 /* Single hash.  Just do blocking lock. */
426                 return tdb_brlock(tdb, ltype, off, len, flags);
427         }
428
429         /* First we try non-blocking. */
430         if (tdb_brlock(tdb, ltype, off, len, nb_flags) == TDB_SUCCESS) {
431                 return TDB_SUCCESS;
432         }
433
434         /* Try locking first half, then second. */
435         ecode = tdb_lock_gradual(tdb, ltype, flags, off, len / 2);
436         if (ecode != TDB_SUCCESS)
437                 return ecode;
438
439         ecode = tdb_lock_gradual(tdb, ltype, flags,
440                                  off + len / 2, len - len / 2);
441         if (ecode != TDB_SUCCESS) {
442                 tdb_brunlock(tdb, ltype, off, len / 2);
443         }
444         return ecode;
445 }
446
447 /* lock/unlock entire database.  It can only be upgradable if you have some
448  * other way of guaranteeing exclusivity (ie. transaction write lock). */
449 enum TDB_ERROR tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
450                                   enum tdb_lock_flags flags, bool upgradable)
451 {
452         enum TDB_ERROR ecode;
453         tdb_bool_err berr;
454
455         /* FIXME: There are no locks on read-only dbs */
456         if (tdb->read_only) {
457                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
458                                   "tdb_allrecord_lock: read-only");
459         }
460
461         if (tdb->allrecord_lock.count
462             && (ltype == F_RDLCK || tdb->allrecord_lock.ltype == F_WRLCK)) {
463                 tdb->allrecord_lock.count++;
464                 return TDB_SUCCESS;
465         }
466
467         if (tdb->allrecord_lock.count) {
468                 /* a global lock of a different type exists */
469                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
470                                   "tdb_allrecord_lock: already have %s lock",
471                                   tdb->allrecord_lock.ltype == F_RDLCK
472                                   ? "read" : "write");
473         }
474
475         if (tdb_has_hash_locks(tdb)) {
476                 /* can't combine global and chain locks */
477                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
478                                   "tdb_allrecord_lock:"
479                                   " already have chain lock");
480         }
481
482         if (upgradable && ltype != F_RDLCK) {
483                 /* tdb error: you can't upgrade a write lock! */
484                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
485                                   "tdb_allrecord_lock:"
486                                   " can't upgrade a write lock");
487         }
488
489         add_stat(tdb, locks, 1);
490 again:
491         /* Lock hashes, gradually. */
492         ecode = tdb_lock_gradual(tdb, ltype, flags, TDB_HASH_LOCK_START,
493                                  TDB_HASH_LOCK_RANGE);
494         if (ecode != TDB_SUCCESS) {
495                 if (!(flags & TDB_LOCK_PROBE)) {
496                         tdb_logerr(tdb, ecode, TDB_LOG_ERROR,
497                                    "tdb_allrecord_lock hashes failed");
498                 }
499                 return ecode;
500         }
501
502         /* Lock free tables: there to end of file. */
503         ecode = tdb_brlock(tdb, ltype,
504                            TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE,
505                            0, flags);
506         if (ecode != TDB_SUCCESS) {
507                 if (!(flags & TDB_LOCK_PROBE)) {
508                         tdb_logerr(tdb, ecode, TDB_LOG_ERROR,
509                                  "tdb_allrecord_lock freetables failed");
510                 }
511                 tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START,
512                              TDB_HASH_LOCK_RANGE);
513                 return ecode;
514         }
515
516         tdb->allrecord_lock.count = 1;
517         /* If it's upgradable, it's actually exclusive so we can treat
518          * it as a write lock. */
519         tdb->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
520         tdb->allrecord_lock.off = upgradable;
521
522         /* Now check for needing recovery. */
523         if (flags & TDB_LOCK_NOCHECK)
524                 return TDB_SUCCESS;
525
526         berr = tdb_needs_recovery(tdb);
527         if (likely(berr == false))
528                 return TDB_SUCCESS;
529
530         tdb_allrecord_unlock(tdb, ltype);
531         if (berr < 0)
532                 return berr;
533         ecode = tdb_lock_and_recover(tdb);
534         if (ecode != TDB_SUCCESS) {
535                 return ecode;
536         }
537         goto again;
538 }
539
540 enum TDB_ERROR tdb_lock_open(struct tdb_context *tdb, enum tdb_lock_flags flags)
541 {
542         return tdb_nest_lock(tdb, TDB_OPEN_LOCK, F_WRLCK, flags);
543 }
544
545 void tdb_unlock_open(struct tdb_context *tdb)
546 {
547         tdb_nest_unlock(tdb, TDB_OPEN_LOCK, F_WRLCK);
548 }
549
550 bool tdb_has_open_lock(struct tdb_context *tdb)
551 {
552         return find_nestlock(tdb, TDB_OPEN_LOCK) != NULL;
553 }
554
555 enum TDB_ERROR tdb_lock_expand(struct tdb_context *tdb, int ltype)
556 {
557         /* Lock doesn't protect data, so don't check (we recurse if we do!) */
558         return tdb_nest_lock(tdb, TDB_EXPANSION_LOCK, ltype,
559                              TDB_LOCK_WAIT | TDB_LOCK_NOCHECK);
560 }
561
562 void tdb_unlock_expand(struct tdb_context *tdb, int ltype)
563 {
564         tdb_nest_unlock(tdb, TDB_EXPANSION_LOCK, ltype);
565 }
566
567 /* unlock entire db */
568 void tdb_allrecord_unlock(struct tdb_context *tdb, int ltype)
569 {
570         if (tdb->allrecord_lock.count == 0) {
571                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
572                            "tdb_allrecord_unlock: not locked!");
573                 return;
574         }
575
576         /* Upgradable locks are marked as write locks. */
577         if (tdb->allrecord_lock.ltype != ltype
578             && (!tdb->allrecord_lock.off || ltype != F_RDLCK)) {
579                 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
580                            "tdb_allrecord_unlock: have %s lock",
581                            tdb->allrecord_lock.ltype == F_RDLCK
582                            ? "read" : "write");
583                 return;
584         }
585
586         if (tdb->allrecord_lock.count > 1) {
587                 tdb->allrecord_lock.count--;
588                 return;
589         }
590
591         tdb->allrecord_lock.count = 0;
592         tdb->allrecord_lock.ltype = 0;
593
594         tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START, 0);
595 }
596
597 bool tdb_has_expansion_lock(struct tdb_context *tdb)
598 {
599         return find_nestlock(tdb, TDB_EXPANSION_LOCK) != NULL;
600 }
601
602 bool tdb_has_hash_locks(struct tdb_context *tdb)
603 {
604         unsigned int i;
605
606         for (i=0; i<tdb->num_lockrecs; i++) {
607                 if (tdb->lockrecs[i].off >= TDB_HASH_LOCK_START
608                     && tdb->lockrecs[i].off < (TDB_HASH_LOCK_START
609                                                + TDB_HASH_LOCK_RANGE))
610                         return true;
611         }
612         return false;
613 }
614
615 static bool tdb_has_free_lock(struct tdb_context *tdb)
616 {
617         unsigned int i;
618
619         for (i=0; i<tdb->num_lockrecs; i++) {
620                 if (tdb->lockrecs[i].off
621                     > TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE)
622                         return true;
623         }
624         return false;
625 }
626
627 enum TDB_ERROR tdb_lock_hashes(struct tdb_context *tdb,
628                                tdb_off_t hash_lock,
629                                tdb_len_t hash_range,
630                                int ltype, enum tdb_lock_flags waitflag)
631 {
632         /* FIXME: Do this properly, using hlock_range */
633         unsigned lock = TDB_HASH_LOCK_START
634                 + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
635
636         /* a allrecord lock allows us to avoid per chain locks */
637         if (tdb->allrecord_lock.count &&
638             (ltype == tdb->allrecord_lock.ltype || ltype == F_RDLCK)) {
639                 return TDB_SUCCESS;
640         }
641
642         if (tdb->allrecord_lock.count) {
643                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
644                                   "tdb_lock_hashes:"
645                                   " already have %s allrecordlock",
646                                   tdb->allrecord_lock.ltype == F_RDLCK
647                                   ? "read" : "write");
648         }
649
650         if (tdb_has_free_lock(tdb)) {
651                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
652                                   "tdb_lock_hashes: already have free lock");
653         }
654
655         if (tdb_has_expansion_lock(tdb)) {
656                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
657                                   "tdb_lock_hashes:"
658                                   " already have expansion lock");
659         }
660
661         return tdb_nest_lock(tdb, lock, ltype, waitflag);
662 }
663
664 enum TDB_ERROR tdb_unlock_hashes(struct tdb_context *tdb,
665                                  tdb_off_t hash_lock,
666                                  tdb_len_t hash_range, int ltype)
667 {
668         unsigned lock = TDB_HASH_LOCK_START
669                 + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
670
671         /* a allrecord lock allows us to avoid per chain locks */
672         if (tdb->allrecord_lock.count) {
673                 if (tdb->allrecord_lock.ltype == F_RDLCK
674                     && ltype == F_WRLCK) {
675                         return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
676                                           "tdb_unlock_hashes RO allrecord!");
677                 }
678                 return TDB_SUCCESS;
679         }
680
681         return tdb_nest_unlock(tdb, lock, ltype);
682 }
683
684 /* Hash locks use TDB_HASH_LOCK_START + the next 30 bits.
685  * Then we begin; bucket offsets are sizeof(tdb_len_t) apart, so we divide.
686  * The result is that on 32 bit systems we don't use lock values > 2^31 on
687  * files that are less than 4GB.
688  */
689 static tdb_off_t free_lock_off(tdb_off_t b_off)
690 {
691         return TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
692                 + b_off / sizeof(tdb_off_t);
693 }
694
695 enum TDB_ERROR tdb_lock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off,
696                                     enum tdb_lock_flags waitflag)
697 {
698         assert(b_off >= sizeof(struct tdb_header));
699
700         /* a allrecord lock allows us to avoid per chain locks */
701         if (tdb->allrecord_lock.count) {
702                 if (tdb->allrecord_lock.ltype == F_WRLCK)
703                         return 0;
704                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
705                                   "tdb_lock_free_bucket with"
706                                   " read-only allrecordlock!");
707         }
708
709 #if 0 /* FIXME */
710         if (tdb_has_expansion_lock(tdb)) {
711                 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
712                                   "tdb_lock_free_bucket:"
713                                   " already have expansion lock");
714         }
715 #endif
716
717         return tdb_nest_lock(tdb, free_lock_off(b_off), F_WRLCK, waitflag);
718 }
719
720 void tdb_unlock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off)
721 {
722         if (tdb->allrecord_lock.count)
723                 return;
724
725         tdb_nest_unlock(tdb, free_lock_off(b_off), F_WRLCK);
726 }
727
728 void tdb_lock_init(struct tdb_context *tdb)
729 {
730         tdb->num_lockrecs = 0;
731         tdb->lockrecs = NULL;
732         tdb->allrecord_lock.count = 0;
733 }