2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Andrew Tridgell 1999-2005
7 Copyright (C) Paul `Rusty' Russell 2000
8 Copyright (C) Jeremy Allison 2000-2003
10 ** NOTE! The following LGPL license applies to the tdb
11 ** library. This does NOT imply that all of Samba is released
14 This library is free software; you can redistribute it and/or
15 modify it under the terms of the GNU Lesser General Public
16 License as published by the Free Software Foundation; either
17 version 3 of the License, or (at your option) any later version.
19 This library is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 Lesser General Public License for more details.
24 You should have received a copy of the GNU Lesser General Public
25 License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 #include <ccan/build_assert/build_assert.h>
32 /* If we were threaded, we could wait for unlock, but we're not, so fail. */
33 static enum TDB_ERROR owner_conflict(struct tdb_context *tdb, const char *call)
35 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
36 "%s: lock owned by another tdb in this process.",
40 /* If we fork, we no longer really own locks: preserves errno */
41 static bool check_lock_pid(struct tdb_context *tdb,
42 const char *call, bool log)
44 /* No locks? No problem! */
45 if (tdb->file->allrecord_lock.count == 0
46 && tdb->file->num_lockrecs == 0) {
50 /* No fork? No problem! */
51 if (tdb->file->locker == getpid()) {
56 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
57 "%s: fork() detected after lock acquisition!"
58 " (%u vs %u)", call, tdb->file->locker, getpid());
63 int tdb_fcntl_lock(int fd, int rw, off_t off, off_t len, bool waitflag,
71 fl.l_whence = SEEK_SET;
76 ret = fcntl(fd, F_SETLKW, &fl);
78 ret = fcntl(fd, F_SETLK, &fl);
79 } while (ret != 0 && errno == EINTR);
83 int tdb_fcntl_unlock(int fd, int rw, off_t off, off_t len, void *unused)
90 fl.l_whence = SEEK_SET;
94 ret = fcntl(fd, F_SETLKW, &fl);
95 } while (ret != 0 && errno == EINTR);
99 static int lock(struct tdb_context *tdb,
100 int rw, off_t off, off_t len, bool waitflag)
102 if (tdb->file->allrecord_lock.count == 0
103 && tdb->file->num_lockrecs == 0) {
104 tdb->file->locker = getpid();
107 add_stat(tdb, lock_lowlevel, 1);
109 add_stat(tdb, lock_nonblock, 1);
110 return tdb->lock_fn(tdb->file->fd, rw, off, len, waitflag,
114 static int unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
116 #if 0 /* Check they matched up locks and unlocks correctly. */
121 locks = fopen("/proc/locks", "r");
123 while (fgets(line, 80, locks)) {
127 /* eg. 1: FLOCK ADVISORY WRITE 2440 08:01:2180826 0 EOF */
128 p = strchr(line, ':') + 1;
129 if (strncmp(p, " POSIX ADVISORY ", strlen(" POSIX ADVISORY ")))
131 p += strlen(" FLOCK ADVISORY ");
132 if (strncmp(p, "READ ", strlen("READ ")) == 0)
134 else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0)
139 if (atoi(p) != getpid())
141 p = strchr(strchr(p, ' ') + 1, ' ') + 1;
143 p = strchr(p, ' ') + 1;
144 if (strncmp(p, "EOF", 3) == 0)
147 l = atoi(p) - start + 1;
151 fprintf(stderr, "Len %u should be %u: %s",
156 fprintf(stderr, "Type %s wrong: %s",
157 rw == F_RDLCK ? "READ" : "WRITE", line);
166 fprintf(stderr, "Unlock on %u@%u not found!",
174 return tdb->unlock_fn(tdb->file->fd, rw, off, len, tdb->lock_data);
177 /* a byte range locking function - return 0 on success
178 this functions locks len bytes at the specified offset.
180 note that a len of zero means lock to end of file
182 static enum TDB_ERROR tdb_brlock(struct tdb_context *tdb,
183 int rw_type, tdb_off_t offset, tdb_off_t len,
184 enum tdb_lock_flags flags)
188 if (tdb->flags & TDB_NOLOCK) {
192 if (rw_type == F_WRLCK && tdb->read_only) {
193 return tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_LOG_USE_ERROR,
194 "Write lock attempted on read-only database");
197 /* A 32 bit system cannot open a 64-bit file, but it could have
198 * expanded since then: check here. */
199 if ((size_t)(offset + len) != offset + len) {
200 return tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
201 "tdb_brlock: lock on giant offset %llu",
202 (long long)(offset + len));
205 ret = lock(tdb, rw_type, offset, len, flags & TDB_LOCK_WAIT);
207 /* Generic lock error. errno set by fcntl.
208 * EAGAIN is an expected return from non-blocking
210 if (!(flags & TDB_LOCK_PROBE)
211 && (errno != EAGAIN && errno != EINTR)) {
212 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
213 "tdb_brlock failed (fd=%d) at"
214 " offset %zu rw_type=%d flags=%d len=%zu:"
216 tdb->file->fd, (size_t)offset, rw_type,
217 flags, (size_t)len, strerror(errno));
224 static enum TDB_ERROR tdb_brunlock(struct tdb_context *tdb,
225 int rw_type, tdb_off_t offset, size_t len)
229 if (tdb->flags & TDB_NOLOCK) {
233 ret = unlock(tdb, rw_type, offset, len);
235 /* If we fail, *then* we verify that we owned the lock. If not, ok. */
236 if (ret == -1 && check_lock_pid(tdb, "tdb_brunlock", false)) {
237 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
238 "tdb_brunlock failed (fd=%d) at offset %zu"
239 " rw_type=%d len=%zu: %s",
240 tdb->file->fd, (size_t)offset, rw_type,
241 (size_t)len, strerror(errno));
247 upgrade a read lock to a write lock. This needs to be handled in a
248 special way as some OSes (such as solaris) have too conservative
249 deadlock detection and claim a deadlock when progress can be
250 made. For those OSes we may loop for a while.
252 enum TDB_ERROR tdb_allrecord_upgrade(struct tdb_context *tdb)
256 if (!check_lock_pid(tdb, "tdb_transaction_prepare_commit", true))
259 if (tdb->file->allrecord_lock.count != 1) {
260 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
261 "tdb_allrecord_upgrade failed:"
262 " count %u too high",
263 tdb->file->allrecord_lock.count);
266 if (tdb->file->allrecord_lock.off != 1) {
267 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
268 "tdb_allrecord_upgrade failed:"
269 " already upgraded?");
272 if (tdb->file->allrecord_lock.owner != tdb) {
273 return owner_conflict(tdb, "tdb_allrecord_upgrade");
278 if (tdb_brlock(tdb, F_WRLCK,
279 TDB_HASH_LOCK_START, 0,
280 TDB_LOCK_WAIT|TDB_LOCK_PROBE) == TDB_SUCCESS) {
281 tdb->file->allrecord_lock.ltype = F_WRLCK;
282 tdb->file->allrecord_lock.off = 0;
285 if (errno != EDEADLK) {
288 /* sleep for as short a time as we can - more portable than usleep() */
291 select(0, NULL, NULL, NULL, &tv);
294 if (errno != EAGAIN && errno != EINTR)
295 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
296 "tdb_allrecord_upgrade failed");
300 static struct tdb_lock *find_nestlock(struct tdb_context *tdb, tdb_off_t offset,
301 const struct tdb_context *owner)
305 for (i=0; i<tdb->file->num_lockrecs; i++) {
306 if (tdb->file->lockrecs[i].off == offset) {
307 if (owner && tdb->file->lockrecs[i].owner != owner)
309 return &tdb->file->lockrecs[i];
315 enum TDB_ERROR tdb_lock_and_recover(struct tdb_context *tdb)
317 enum TDB_ERROR ecode;
319 if (!check_lock_pid(tdb, "tdb_transaction_prepare_commit", true))
322 ecode = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK,
324 if (ecode != TDB_SUCCESS) {
328 ecode = tdb_lock_open(tdb, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK);
329 if (ecode != TDB_SUCCESS) {
330 tdb_allrecord_unlock(tdb, F_WRLCK);
333 ecode = tdb_transaction_recover(tdb);
334 tdb_unlock_open(tdb);
335 tdb_allrecord_unlock(tdb, F_WRLCK);
340 /* lock an offset in the database. */
341 static enum TDB_ERROR tdb_nest_lock(struct tdb_context *tdb,
342 tdb_off_t offset, int ltype,
343 enum tdb_lock_flags flags)
345 struct tdb_lock *new_lck;
346 enum TDB_ERROR ecode;
348 if (offset > (TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
349 + tdb->file->map_size / 8)) {
350 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
351 "tdb_nest_lock: invalid offset %zu ltype=%d",
352 (size_t)offset, ltype);
355 if (tdb->flags & TDB_NOLOCK)
358 if (!check_lock_pid(tdb, "tdb_nest_lock", true)) {
362 add_stat(tdb, locks, 1);
364 new_lck = find_nestlock(tdb, offset, NULL);
366 if (new_lck->owner != tdb) {
367 return owner_conflict(tdb, "tdb_nest_lock");
370 if (new_lck->ltype == F_RDLCK && ltype == F_WRLCK) {
371 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
373 " offset %zu has read lock",
376 /* Just increment the struct, posix locks don't stack. */
381 if (tdb->file->num_lockrecs
382 && offset >= TDB_HASH_LOCK_START
383 && offset < TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE) {
384 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
385 "tdb_nest_lock: already have a hash lock?");
388 new_lck = (struct tdb_lock *)realloc(
390 sizeof(*tdb->file->lockrecs) * (tdb->file->num_lockrecs+1));
391 if (new_lck == NULL) {
392 return tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
394 " unable to allocate %zu lock struct",
395 tdb->file->num_lockrecs + 1);
397 tdb->file->lockrecs = new_lck;
399 /* Since fcntl locks don't nest, we do a lock for the first one,
400 and simply bump the count for future ones */
401 ecode = tdb_brlock(tdb, ltype, offset, 1, flags);
402 if (ecode != TDB_SUCCESS) {
406 /* First time we grab a lock, perhaps someone died in commit? */
407 if (!(flags & TDB_LOCK_NOCHECK)
408 && tdb->file->num_lockrecs == 0) {
409 tdb_bool_err berr = tdb_needs_recovery(tdb);
411 tdb_brunlock(tdb, ltype, offset, 1);
415 ecode = tdb_lock_and_recover(tdb);
416 if (ecode == TDB_SUCCESS) {
417 ecode = tdb_brlock(tdb, ltype, offset, 1,
420 if (ecode != TDB_SUCCESS) {
426 tdb->file->lockrecs[tdb->file->num_lockrecs].owner = tdb;
427 tdb->file->lockrecs[tdb->file->num_lockrecs].off = offset;
428 tdb->file->lockrecs[tdb->file->num_lockrecs].count = 1;
429 tdb->file->lockrecs[tdb->file->num_lockrecs].ltype = ltype;
430 tdb->file->num_lockrecs++;
435 static enum TDB_ERROR tdb_nest_unlock(struct tdb_context *tdb,
436 tdb_off_t off, int ltype)
438 struct tdb_lock *lck;
439 enum TDB_ERROR ecode;
441 if (tdb->flags & TDB_NOLOCK)
444 lck = find_nestlock(tdb, off, tdb);
445 if ((lck == NULL) || (lck->count == 0)) {
446 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
447 "tdb_nest_unlock: no lock for %zu",
451 if (lck->count > 1) {
457 * This lock has count==1 left, so we need to unlock it in the
458 * kernel. We don't bother with decrementing the in-memory array
459 * element, we're about to overwrite it with the last array element
462 ecode = tdb_brunlock(tdb, ltype, off, 1);
465 * Shrink the array by overwriting the element just unlocked with the
466 * last array element.
468 *lck = tdb->file->lockrecs[--tdb->file->num_lockrecs];
474 get the transaction lock
476 enum TDB_ERROR tdb_transaction_lock(struct tdb_context *tdb, int ltype)
478 return tdb_nest_lock(tdb, TDB_TRANSACTION_LOCK, ltype, TDB_LOCK_WAIT);
482 release the transaction lock
484 void tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
486 tdb_nest_unlock(tdb, TDB_TRANSACTION_LOCK, ltype);
489 /* We only need to lock individual bytes, but Linux merges consecutive locks
490 * so we lock in contiguous ranges. */
491 static enum TDB_ERROR tdb_lock_gradual(struct tdb_context *tdb,
492 int ltype, enum tdb_lock_flags flags,
493 tdb_off_t off, tdb_off_t len)
495 enum TDB_ERROR ecode;
496 enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT);
499 /* 0 would mean to end-of-file... */
501 /* Single hash. Just do blocking lock. */
502 return tdb_brlock(tdb, ltype, off, len, flags);
505 /* First we try non-blocking. */
506 if (tdb_brlock(tdb, ltype, off, len, nb_flags) == TDB_SUCCESS) {
510 /* Try locking first half, then second. */
511 ecode = tdb_lock_gradual(tdb, ltype, flags, off, len / 2);
512 if (ecode != TDB_SUCCESS)
515 ecode = tdb_lock_gradual(tdb, ltype, flags,
516 off + len / 2, len - len / 2);
517 if (ecode != TDB_SUCCESS) {
518 tdb_brunlock(tdb, ltype, off, len / 2);
523 /* lock/unlock entire database. It can only be upgradable if you have some
524 * other way of guaranteeing exclusivity (ie. transaction write lock). */
525 enum TDB_ERROR tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
526 enum tdb_lock_flags flags, bool upgradable)
528 enum TDB_ERROR ecode;
531 if (tdb->flags & TDB_NOLOCK)
534 if (!check_lock_pid(tdb, "tdb_allrecord_lock", true)) {
538 if (tdb->file->allrecord_lock.count) {
539 if (tdb->file->allrecord_lock.owner != tdb) {
540 return owner_conflict(tdb, "tdb_allrecord_lock");
544 || tdb->file->allrecord_lock.ltype == F_WRLCK) {
545 tdb->file->allrecord_lock.count++;
549 /* a global lock of a different type exists */
550 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
551 "tdb_allrecord_lock: already have %s lock",
552 tdb->file->allrecord_lock.ltype == F_RDLCK
556 if (tdb_has_hash_locks(tdb)) {
557 /* can't combine global and chain locks */
558 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
559 "tdb_allrecord_lock:"
560 " already have chain lock");
563 if (upgradable && ltype != F_RDLCK) {
564 /* tdb error: you can't upgrade a write lock! */
565 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
566 "tdb_allrecord_lock:"
567 " can't upgrade a write lock");
570 add_stat(tdb, locks, 1);
572 /* Lock hashes, gradually. */
573 ecode = tdb_lock_gradual(tdb, ltype, flags, TDB_HASH_LOCK_START,
574 TDB_HASH_LOCK_RANGE);
575 if (ecode != TDB_SUCCESS)
578 /* Lock free tables: there to end of file. */
579 ecode = tdb_brlock(tdb, ltype,
580 TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE,
582 if (ecode != TDB_SUCCESS) {
583 tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START,
584 TDB_HASH_LOCK_RANGE);
588 tdb->file->allrecord_lock.owner = tdb;
589 tdb->file->allrecord_lock.count = 1;
590 /* If it's upgradable, it's actually exclusive so we can treat
591 * it as a write lock. */
592 tdb->file->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
593 tdb->file->allrecord_lock.off = upgradable;
595 /* Now check for needing recovery. */
596 if (flags & TDB_LOCK_NOCHECK)
599 berr = tdb_needs_recovery(tdb);
600 if (likely(berr == false))
603 tdb_allrecord_unlock(tdb, ltype);
606 ecode = tdb_lock_and_recover(tdb);
607 if (ecode != TDB_SUCCESS) {
613 enum TDB_ERROR tdb_lock_open(struct tdb_context *tdb, enum tdb_lock_flags flags)
615 return tdb_nest_lock(tdb, TDB_OPEN_LOCK, F_WRLCK, flags);
618 void tdb_unlock_open(struct tdb_context *tdb)
620 tdb_nest_unlock(tdb, TDB_OPEN_LOCK, F_WRLCK);
623 bool tdb_has_open_lock(struct tdb_context *tdb)
625 return !(tdb->flags & TDB_NOLOCK)
626 && find_nestlock(tdb, TDB_OPEN_LOCK, tdb) != NULL;
629 enum TDB_ERROR tdb_lock_expand(struct tdb_context *tdb, int ltype)
631 /* Lock doesn't protect data, so don't check (we recurse if we do!) */
632 return tdb_nest_lock(tdb, TDB_EXPANSION_LOCK, ltype,
633 TDB_LOCK_WAIT | TDB_LOCK_NOCHECK);
636 void tdb_unlock_expand(struct tdb_context *tdb, int ltype)
638 tdb_nest_unlock(tdb, TDB_EXPANSION_LOCK, ltype);
641 /* unlock entire db */
642 void tdb_allrecord_unlock(struct tdb_context *tdb, int ltype)
644 if (tdb->flags & TDB_NOLOCK)
647 if (tdb->file->allrecord_lock.count == 0) {
648 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
649 "tdb_allrecord_unlock: not locked!");
653 if (tdb->file->allrecord_lock.owner != tdb) {
654 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
655 "tdb_allrecord_unlock: not locked by us!");
659 /* Upgradable locks are marked as write locks. */
660 if (tdb->file->allrecord_lock.ltype != ltype
661 && (!tdb->file->allrecord_lock.off || ltype != F_RDLCK)) {
662 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
663 "tdb_allrecord_unlock: have %s lock",
664 tdb->file->allrecord_lock.ltype == F_RDLCK
669 if (tdb->file->allrecord_lock.count > 1) {
670 tdb->file->allrecord_lock.count--;
674 tdb->file->allrecord_lock.count = 0;
675 tdb->file->allrecord_lock.ltype = 0;
677 tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START, 0);
680 bool tdb_has_expansion_lock(struct tdb_context *tdb)
682 return find_nestlock(tdb, TDB_EXPANSION_LOCK, tdb) != NULL;
685 bool tdb_has_hash_locks(struct tdb_context *tdb)
689 for (i=0; i<tdb->file->num_lockrecs; i++) {
690 if (tdb->file->lockrecs[i].off >= TDB_HASH_LOCK_START
691 && tdb->file->lockrecs[i].off < (TDB_HASH_LOCK_START
692 + TDB_HASH_LOCK_RANGE))
698 static bool tdb_has_free_lock(struct tdb_context *tdb)
702 if (tdb->flags & TDB_NOLOCK)
705 for (i=0; i<tdb->file->num_lockrecs; i++) {
706 if (tdb->file->lockrecs[i].off
707 > TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE)
713 enum TDB_ERROR tdb_lock_hashes(struct tdb_context *tdb,
715 tdb_len_t hash_range,
716 int ltype, enum tdb_lock_flags waitflag)
718 /* FIXME: Do this properly, using hlock_range */
719 unsigned l = TDB_HASH_LOCK_START
720 + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
722 /* a allrecord lock allows us to avoid per chain locks */
723 if (tdb->file->allrecord_lock.count) {
724 if (!check_lock_pid(tdb, "tdb_lock_hashes", true))
727 if (tdb->file->allrecord_lock.owner != tdb)
728 return owner_conflict(tdb, "tdb_lock_hashes");
729 if (ltype == tdb->file->allrecord_lock.ltype
730 || ltype == F_RDLCK) {
734 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
736 " already have %s allrecordlock",
737 tdb->file->allrecord_lock.ltype == F_RDLCK
741 if (tdb_has_free_lock(tdb)) {
742 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
743 "tdb_lock_hashes: already have free lock");
746 if (tdb_has_expansion_lock(tdb)) {
747 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
749 " already have expansion lock");
752 return tdb_nest_lock(tdb, l, ltype, waitflag);
755 enum TDB_ERROR tdb_unlock_hashes(struct tdb_context *tdb,
757 tdb_len_t hash_range, int ltype)
759 unsigned l = TDB_HASH_LOCK_START
760 + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
762 if (tdb->flags & TDB_NOLOCK)
765 /* a allrecord lock allows us to avoid per chain locks */
766 if (tdb->file->allrecord_lock.count) {
767 if (tdb->file->allrecord_lock.ltype == F_RDLCK
768 && ltype == F_WRLCK) {
769 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
770 "tdb_unlock_hashes RO allrecord!");
775 return tdb_nest_unlock(tdb, l, ltype);
778 /* Hash locks use TDB_HASH_LOCK_START + the next 30 bits.
779 * Then we begin; bucket offsets are sizeof(tdb_len_t) apart, so we divide.
780 * The result is that on 32 bit systems we don't use lock values > 2^31 on
781 * files that are less than 4GB.
783 static tdb_off_t free_lock_off(tdb_off_t b_off)
785 return TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
786 + b_off / sizeof(tdb_off_t);
789 enum TDB_ERROR tdb_lock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off,
790 enum tdb_lock_flags waitflag)
792 assert(b_off >= sizeof(struct tdb_header));
794 if (tdb->flags & TDB_NOLOCK)
797 /* a allrecord lock allows us to avoid per chain locks */
798 if (tdb->file->allrecord_lock.count) {
799 if (!check_lock_pid(tdb, "tdb_lock_free_bucket", true))
802 if (tdb->file->allrecord_lock.ltype == F_WRLCK)
804 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
805 "tdb_lock_free_bucket with"
806 " read-only allrecordlock!");
810 if (tdb_has_expansion_lock(tdb)) {
811 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
812 "tdb_lock_free_bucket:"
813 " already have expansion lock");
817 return tdb_nest_lock(tdb, free_lock_off(b_off), F_WRLCK, waitflag);
820 void tdb_unlock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off)
822 if (tdb->file->allrecord_lock.count)
825 tdb_nest_unlock(tdb, free_lock_off(b_off), F_WRLCK);
828 enum TDB_ERROR tdb_lockall(struct tdb_context *tdb)
830 return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
833 void tdb_unlockall(struct tdb_context *tdb)
835 tdb_allrecord_unlock(tdb, F_WRLCK);
838 enum TDB_ERROR tdb_lockall_read(struct tdb_context *tdb)
840 return tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
843 void tdb_unlockall_read(struct tdb_context *tdb)
845 tdb_allrecord_unlock(tdb, F_RDLCK);
848 void tdb_lock_cleanup(struct tdb_context *tdb)
852 while (tdb->file->allrecord_lock.count
853 && tdb->file->allrecord_lock.owner == tdb) {
854 tdb_allrecord_unlock(tdb, tdb->file->allrecord_lock.ltype);
857 for (i=0; i<tdb->file->num_lockrecs; i++) {
858 if (tdb->file->lockrecs[i].owner == tdb) {
860 tdb->file->lockrecs[i].off,
861 tdb->file->lockrecs[i].ltype);