2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Andrew Tridgell 1999-2005
7 Copyright (C) Paul `Rusty' Russell 2000
8 Copyright (C) Jeremy Allison 2000-2003
10 ** NOTE! The following LGPL license applies to the tdb
11 ** library. This does NOT imply that all of Samba is released
14 This library is free software; you can redistribute it and/or
15 modify it under the terms of the GNU Lesser General Public
16 License as published by the Free Software Foundation; either
17 version 3 of the License, or (at your option) any later version.
19 This library is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 Lesser General Public License for more details.
24 You should have received a copy of the GNU Lesser General Public
25 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "tdb_private.h"
30 void tdb_setalarm_sigptr(struct tdb_context *tdb, volatile sig_atomic_t *ptr)
32 tdb->interrupt_sig_ptr = ptr;
35 static int fcntl_lock(struct tdb_context *tdb,
36 int rw, off_t off, off_t len, bool waitflag)
41 fl.l_whence = SEEK_SET;
47 return fcntl(tdb->fd, F_SETLKW, &fl);
49 return fcntl(tdb->fd, F_SETLK, &fl);
52 static int fcntl_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
55 #if 0 /* Check they matched up locks and unlocks correctly. */
60 locks = fopen("/proc/locks", "r");
62 while (fgets(line, 80, locks)) {
66 /* eg. 1: FLOCK ADVISORY WRITE 2440 08:01:2180826 0 EOF */
67 p = strchr(line, ':') + 1;
68 if (strncmp(p, " POSIX ADVISORY ", strlen(" POSIX ADVISORY ")))
70 p += strlen(" FLOCK ADVISORY ");
71 if (strncmp(p, "READ ", strlen("READ ")) == 0)
73 else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0)
78 if (atoi(p) != getpid())
80 p = strchr(strchr(p, ' ') + 1, ' ') + 1;
82 p = strchr(p, ' ') + 1;
83 if (strncmp(p, "EOF", 3) == 0)
86 l = atoi(p) - start + 1;
90 fprintf(stderr, "Len %u should be %u: %s",
95 fprintf(stderr, "Type %s wrong: %s",
96 rw == F_RDLCK ? "READ" : "WRITE", line);
105 fprintf(stderr, "Unlock on %u@%u not found!\n",
114 fl.l_whence = SEEK_SET;
119 return fcntl(tdb->fd, F_SETLKW, &fl);
122 /* list -1 is the alloc list, otherwise a hash chain. */
123 static tdb_off_t lock_offset(int list)
125 return FREELIST_TOP + 4*list;
128 /* a byte range locking function - return 0 on success
129 this functions locks/unlocks 1 byte at the specified offset.
131 On error, errno is also set so that errors are passed back properly
134 note that a len of zero means lock to end of file
136 int tdb_brlock(struct tdb_context *tdb,
137 int rw_type, tdb_off_t offset, size_t len,
138 enum tdb_lock_flags flags)
142 if (tdb->flags & TDB_NOLOCK) {
146 if (flags & TDB_LOCK_MARK_ONLY) {
150 if ((rw_type == F_WRLCK) && (tdb->read_only || tdb->traverse_read)) {
151 tdb->ecode = TDB_ERR_RDONLY;
156 ret = fcntl_lock(tdb, rw_type, offset, len,
157 flags & TDB_LOCK_WAIT);
158 /* Check for a sigalarm break. */
159 if (ret == -1 && errno == EINTR &&
160 tdb->interrupt_sig_ptr &&
161 *tdb->interrupt_sig_ptr) {
164 } while (ret == -1 && errno == EINTR);
167 tdb->ecode = TDB_ERR_LOCK;
168 /* Generic lock error. errno set by fcntl.
169 * EAGAIN is an expected return from non-blocking
171 if (!(flags & TDB_LOCK_PROBE) && errno != EAGAIN) {
172 TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brlock failed (fd=%d) at offset %d rw_type=%d flags=%d len=%d\n",
173 tdb->fd, offset, rw_type, flags, (int)len));
180 int tdb_brunlock(struct tdb_context *tdb,
181 int rw_type, tdb_off_t offset, size_t len)
185 if (tdb->flags & TDB_NOLOCK) {
190 ret = fcntl_unlock(tdb, rw_type, offset, len);
191 } while (ret == -1 && errno == EINTR);
194 TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brunlock failed (fd=%d) at offset %d rw_type=%d len=%d\n",
195 tdb->fd, offset, rw_type, (int)len));
201 upgrade a read lock to a write lock. This needs to be handled in a
202 special way as some OSes (such as solaris) have too conservative
203 deadlock detection and claim a deadlock when progress can be
204 made. For those OSes we may loop for a while.
206 int tdb_allrecord_upgrade(struct tdb_context *tdb)
210 if (tdb->allrecord_lock.count != 1) {
211 TDB_LOG((tdb, TDB_DEBUG_ERROR,
212 "tdb_allrecord_upgrade failed: count %u too high\n",
213 tdb->allrecord_lock.count));
217 if (tdb->allrecord_lock.off != 1) {
218 TDB_LOG((tdb, TDB_DEBUG_ERROR,
219 "tdb_allrecord_upgrade failed: already upgraded?\n"));
225 if (tdb_brlock(tdb, F_WRLCK, FREELIST_TOP, 0,
226 TDB_LOCK_WAIT|TDB_LOCK_PROBE) == 0) {
227 tdb->allrecord_lock.ltype = F_WRLCK;
228 tdb->allrecord_lock.off = 0;
231 if (errno != EDEADLK) {
234 /* sleep for as short a time as we can - more portable than usleep() */
237 select(0, NULL, NULL, NULL, &tv);
239 TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_allrecord_upgrade failed\n"));
243 static struct tdb_lock_type *find_nestlock(struct tdb_context *tdb,
248 for (i=0; i<tdb->num_lockrecs; i++) {
249 if (tdb->lockrecs[i].off == offset) {
250 return &tdb->lockrecs[i];
256 /* lock an offset in the database. */
257 int tdb_nest_lock(struct tdb_context *tdb, uint32_t offset, int ltype,
258 enum tdb_lock_flags flags)
260 struct tdb_lock_type *new_lck;
262 if (offset >= lock_offset(tdb->header.hash_size)) {
263 tdb->ecode = TDB_ERR_LOCK;
264 TDB_LOG((tdb, TDB_DEBUG_ERROR,"tdb_lock: invalid offset %u for ltype=%d\n",
268 if (tdb->flags & TDB_NOLOCK)
271 new_lck = find_nestlock(tdb, offset);
274 * Just increment the in-memory struct, posix locks
281 new_lck = (struct tdb_lock_type *)realloc(
283 sizeof(*tdb->lockrecs) * (tdb->num_lockrecs+1));
284 if (new_lck == NULL) {
288 tdb->lockrecs = new_lck;
290 /* Since fcntl locks don't nest, we do a lock for the first one,
291 and simply bump the count for future ones */
292 if (tdb_brlock(tdb, ltype, offset, 1, flags)) {
296 tdb->lockrecs[tdb->num_lockrecs].off = offset;
297 tdb->lockrecs[tdb->num_lockrecs].count = 1;
298 tdb->lockrecs[tdb->num_lockrecs].ltype = ltype;
304 static int tdb_lock_and_recover(struct tdb_context *tdb)
308 /* We need to match locking order in transaction commit. */
309 if (tdb_brlock(tdb, F_WRLCK, FREELIST_TOP, 0, TDB_LOCK_WAIT)) {
313 if (tdb_brlock(tdb, F_WRLCK, OPEN_LOCK, 1, TDB_LOCK_WAIT)) {
314 tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
318 ret = tdb_transaction_recover(tdb);
320 tdb_brunlock(tdb, F_WRLCK, OPEN_LOCK, 1);
321 tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
326 static bool have_data_locks(const struct tdb_context *tdb)
330 for (i = 0; i < tdb->num_lockrecs; i++) {
331 if (tdb->lockrecs[i].off >= lock_offset(-1))
337 static int tdb_lock_list(struct tdb_context *tdb, int list, int ltype,
338 enum tdb_lock_flags waitflag)
343 /* a allrecord lock allows us to avoid per chain locks */
344 if (tdb->allrecord_lock.count &&
345 (ltype == tdb->allrecord_lock.ltype || ltype == F_RDLCK)) {
349 if (tdb->allrecord_lock.count) {
350 tdb->ecode = TDB_ERR_LOCK;
353 /* Only check when we grab first data lock. */
354 check = !have_data_locks(tdb);
355 ret = tdb_nest_lock(tdb, lock_offset(list), ltype, waitflag);
357 if (ret == 0 && check && tdb_needs_recovery(tdb)) {
358 tdb_nest_unlock(tdb, lock_offset(list), ltype, false);
360 if (tdb_lock_and_recover(tdb) == -1) {
363 return tdb_lock_list(tdb, list, ltype, waitflag);
369 /* lock a list in the database. list -1 is the alloc list */
370 int tdb_lock(struct tdb_context *tdb, int list, int ltype)
374 ret = tdb_lock_list(tdb, list, ltype, TDB_LOCK_WAIT);
376 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lock failed on list %d "
377 "ltype=%d (%s)\n", list, ltype, strerror(errno)));
382 /* lock a list in the database. list -1 is the alloc list. non-blocking lock */
383 int tdb_lock_nonblock(struct tdb_context *tdb, int list, int ltype)
385 return tdb_lock_list(tdb, list, ltype, TDB_LOCK_NOWAIT);
389 int tdb_nest_unlock(struct tdb_context *tdb, uint32_t offset, int ltype,
393 struct tdb_lock_type *lck;
395 if (tdb->flags & TDB_NOLOCK)
399 if (offset >= lock_offset(tdb->header.hash_size)) {
400 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: offset %u invalid (%d)\n", offset, tdb->header.hash_size));
404 lck = find_nestlock(tdb, offset);
405 if ((lck == NULL) || (lck->count == 0)) {
406 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: count is 0\n"));
410 if (lck->count > 1) {
416 * This lock has count==1 left, so we need to unlock it in the
417 * kernel. We don't bother with decrementing the in-memory array
418 * element, we're about to overwrite it with the last array element
425 ret = tdb_brunlock(tdb, ltype, offset, 1);
429 * Shrink the array by overwriting the element just unlocked with the
430 * last array element.
432 *lck = tdb->lockrecs[--tdb->num_lockrecs];
435 * We don't bother with realloc when the array shrinks, but if we have
436 * a completely idle tdb we should get rid of the locked array.
439 if (tdb->num_lockrecs == 0) {
440 SAFE_FREE(tdb->lockrecs);
444 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: An error occurred unlocking!\n"));
448 int tdb_unlock(struct tdb_context *tdb, int list, int ltype)
450 /* a global lock allows us to avoid per chain locks */
451 if (tdb->allrecord_lock.count &&
452 (ltype == tdb->allrecord_lock.ltype || ltype == F_RDLCK)) {
456 if (tdb->allrecord_lock.count) {
457 tdb->ecode = TDB_ERR_LOCK;
461 return tdb_nest_unlock(tdb, lock_offset(list), ltype, false);
465 get the transaction lock
467 int tdb_transaction_lock(struct tdb_context *tdb, int ltype)
469 return tdb_nest_lock(tdb, TRANSACTION_LOCK, ltype, TDB_LOCK_WAIT);
473 release the transaction lock
475 int tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
477 return tdb_nest_unlock(tdb, TRANSACTION_LOCK, ltype, false);
480 /* Returns 0 if all done, -1 if error, 1 if ok. */
481 static int tdb_allrecord_check(struct tdb_context *tdb, int ltype,
482 enum tdb_lock_flags flags, bool upgradable)
484 /* There are no locks on read-only dbs */
485 if (tdb->read_only || tdb->traverse_read) {
486 tdb->ecode = TDB_ERR_LOCK;
490 if (tdb->allrecord_lock.count && tdb->allrecord_lock.ltype == ltype) {
491 tdb->allrecord_lock.count++;
495 if (tdb->allrecord_lock.count) {
496 /* a global lock of a different type exists */
497 tdb->ecode = TDB_ERR_LOCK;
501 if (tdb_have_extra_locks(tdb)) {
502 /* can't combine global and chain locks */
503 tdb->ecode = TDB_ERR_LOCK;
507 if (upgradable && ltype != F_RDLCK) {
508 /* tdb error: you can't upgrade a write lock! */
509 tdb->ecode = TDB_ERR_LOCK;
515 /* We only need to lock individual bytes, but Linux merges consecutive locks
516 * so we lock in contiguous ranges. */
517 static int tdb_chainlock_gradual(struct tdb_context *tdb,
518 int ltype, enum tdb_lock_flags flags,
519 size_t off, size_t len)
522 enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT);
525 /* Single record. Just do blocking lock. */
526 return tdb_brlock(tdb, ltype, off, len, flags);
529 /* First we try non-blocking. */
530 ret = tdb_brlock(tdb, ltype, off, len, nb_flags);
535 /* Try locking first half, then second. */
536 ret = tdb_chainlock_gradual(tdb, ltype, flags, off, len / 2);
540 ret = tdb_chainlock_gradual(tdb, ltype, flags,
541 off + len / 2, len - len / 2);
543 tdb_brunlock(tdb, ltype, off, len / 2);
549 /* lock/unlock entire database. It can only be upgradable if you have some
550 * other way of guaranteeing exclusivity (ie. transaction write lock).
551 * We do the locking gradually to avoid being starved by smaller locks. */
552 int tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
553 enum tdb_lock_flags flags, bool upgradable)
555 switch (tdb_allrecord_check(tdb, ltype, flags, upgradable)) {
562 /* We cover two kinds of locks:
563 * 1) Normal chain locks. Taken for almost all operations.
564 * 3) Individual records locks. Taken after normal or free
567 * It is (1) which cause the starvation problem, so we're only
568 * gradual for that. */
569 if (tdb_chainlock_gradual(tdb, ltype, flags, FREELIST_TOP,
570 tdb->header.hash_size * 4) == -1) {
574 /* Grab individual record locks. */
575 if (tdb_brlock(tdb, ltype, lock_offset(tdb->header.hash_size), 0,
577 tdb_brunlock(tdb, ltype, FREELIST_TOP,
578 tdb->header.hash_size * 4);
582 tdb->allrecord_lock.count = 1;
583 /* If it's upgradable, it's actually exclusive so we can treat
584 * it as a write lock. */
585 tdb->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
586 tdb->allrecord_lock.off = upgradable;
588 if (tdb_needs_recovery(tdb)) {
589 bool mark = flags & TDB_LOCK_MARK_ONLY;
590 tdb_allrecord_unlock(tdb, ltype, mark);
592 tdb->ecode = TDB_ERR_LOCK;
593 TDB_LOG((tdb, TDB_DEBUG_ERROR,
594 "tdb_lockall_mark cannot do recovery\n"));
597 if (tdb_lock_and_recover(tdb) == -1) {
600 return tdb_allrecord_lock(tdb, ltype, flags, upgradable);
608 /* unlock entire db */
609 int tdb_allrecord_unlock(struct tdb_context *tdb, int ltype, bool mark_lock)
611 /* There are no locks on read-only dbs */
612 if (tdb->read_only || tdb->traverse_read) {
613 tdb->ecode = TDB_ERR_LOCK;
617 if (tdb->allrecord_lock.count == 0) {
618 tdb->ecode = TDB_ERR_LOCK;
622 /* Upgradable locks are marked as write locks. */
623 if (tdb->allrecord_lock.ltype != ltype
624 && (!tdb->allrecord_lock.off || ltype != F_RDLCK)) {
625 tdb->ecode = TDB_ERR_LOCK;
629 if (tdb->allrecord_lock.count > 1) {
630 tdb->allrecord_lock.count--;
634 if (!mark_lock && tdb_brunlock(tdb, ltype, FREELIST_TOP, 0)) {
635 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlockall failed (%s)\n", strerror(errno)));
639 tdb->allrecord_lock.count = 0;
640 tdb->allrecord_lock.ltype = 0;
645 /* lock entire database with write lock */
646 int tdb_lockall(struct tdb_context *tdb)
648 tdb_trace(tdb, "tdb_lockall");
649 return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
652 /* lock entire database with write lock - mark only */
653 int tdb_lockall_mark(struct tdb_context *tdb)
655 tdb_trace(tdb, "tdb_lockall_mark");
656 return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_MARK_ONLY, false);
659 /* unlock entire database with write lock - unmark only */
660 int tdb_lockall_unmark(struct tdb_context *tdb)
662 tdb_trace(tdb, "tdb_lockall_unmark");
663 return tdb_allrecord_unlock(tdb, F_WRLCK, true);
666 /* lock entire database with write lock - nonblocking varient */
667 int tdb_lockall_nonblock(struct tdb_context *tdb)
669 int ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_NOWAIT, false);
670 tdb_trace_ret(tdb, "tdb_lockall_nonblock", ret);
674 /* unlock entire database with write lock */
675 int tdb_unlockall(struct tdb_context *tdb)
677 tdb_trace(tdb, "tdb_unlockall");
678 return tdb_allrecord_unlock(tdb, F_WRLCK, false);
681 /* lock entire database with read lock */
682 int tdb_lockall_read(struct tdb_context *tdb)
684 tdb_trace(tdb, "tdb_lockall_read");
685 return tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
688 /* lock entire database with read lock - nonblock varient */
689 int tdb_lockall_read_nonblock(struct tdb_context *tdb)
691 int ret = tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_NOWAIT, false);
692 tdb_trace_ret(tdb, "tdb_lockall_read_nonblock", ret);
696 /* unlock entire database with read lock */
697 int tdb_unlockall_read(struct tdb_context *tdb)
699 tdb_trace(tdb, "tdb_unlockall_read");
700 return tdb_allrecord_unlock(tdb, F_RDLCK, false);
703 /* lock/unlock one hash chain. This is meant to be used to reduce
704 contention - it cannot guarantee how many records will be locked */
705 int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
707 int ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
708 tdb_trace_1rec(tdb, "tdb_chainlock", key);
712 /* lock/unlock one hash chain, non-blocking. This is meant to be used
713 to reduce contention - it cannot guarantee how many records will be
715 int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key)
717 int ret = tdb_lock_nonblock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
718 tdb_trace_1rec_ret(tdb, "tdb_chainlock_nonblock", key, ret);
722 /* mark a chain as locked without actually locking it. Warning! use with great caution! */
723 int tdb_chainlock_mark(struct tdb_context *tdb, TDB_DATA key)
725 int ret = tdb_nest_lock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))),
726 F_WRLCK, TDB_LOCK_MARK_ONLY);
727 tdb_trace_1rec(tdb, "tdb_chainlock_mark", key);
731 /* unmark a chain as locked without actually locking it. Warning! use with great caution! */
732 int tdb_chainlock_unmark(struct tdb_context *tdb, TDB_DATA key)
734 tdb_trace_1rec(tdb, "tdb_chainlock_unmark", key);
735 return tdb_nest_unlock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))),
739 int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
741 tdb_trace_1rec(tdb, "tdb_chainunlock", key);
742 return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
745 int tdb_chainlock_read(struct tdb_context *tdb, TDB_DATA key)
748 ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
749 tdb_trace_1rec(tdb, "tdb_chainlock_read", key);
753 int tdb_chainunlock_read(struct tdb_context *tdb, TDB_DATA key)
755 tdb_trace_1rec(tdb, "tdb_chainunlock_read", key);
756 return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
761 /* record lock stops delete underneath */
762 int tdb_lock_record(struct tdb_context *tdb, tdb_off_t off)
764 if (tdb->allrecord_lock.count) {
767 return off ? tdb_brlock(tdb, F_RDLCK, off, 1, TDB_LOCK_WAIT) : 0;
771 Write locks override our own fcntl readlocks, so check it here.
772 Note this is meant to be F_SETLK, *not* F_SETLKW, as it's not
773 an error to fail to get the lock here.
775 int tdb_write_lock_record(struct tdb_context *tdb, tdb_off_t off)
777 struct tdb_traverse_lock *i;
778 for (i = &tdb->travlocks; i; i = i->next)
781 if (tdb->allrecord_lock.count) {
782 if (tdb->allrecord_lock.ltype == F_WRLCK) {
787 return tdb_brlock(tdb, F_WRLCK, off, 1, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE);
790 int tdb_write_unlock_record(struct tdb_context *tdb, tdb_off_t off)
792 if (tdb->allrecord_lock.count) {
795 return tdb_brunlock(tdb, F_WRLCK, off, 1);
798 /* fcntl locks don't stack: avoid unlocking someone else's */
799 int tdb_unlock_record(struct tdb_context *tdb, tdb_off_t off)
801 struct tdb_traverse_lock *i;
804 if (tdb->allrecord_lock.count) {
810 for (i = &tdb->travlocks; i; i = i->next)
813 return (count == 1 ? tdb_brunlock(tdb, F_RDLCK, off, 1) : 0);
816 bool tdb_have_extra_locks(struct tdb_context *tdb)
818 unsigned int extra = tdb->num_lockrecs;
820 /* A transaction holds the lock for all records. */
821 if (!tdb->transaction && tdb->allrecord_lock.count) {
825 /* We always hold the active lock if CLEAR_IF_FIRST. */
826 if (find_nestlock(tdb, ACTIVE_LOCK)) {
830 /* In a transaction, we expect to hold the transaction lock */
831 if (tdb->transaction && find_nestlock(tdb, TRANSACTION_LOCK)) {
838 /* The transaction code uses this to remove all locks. Note that this
839 may include OPEN_LOCK. */
840 void tdb_release_extra_locks(struct tdb_context *tdb)
842 unsigned int i, extra = 0;
844 if (tdb->allrecord_lock.count != 0) {
845 tdb_brunlock(tdb, tdb->allrecord_lock.ltype, FREELIST_TOP, 0);
846 tdb->allrecord_lock.count = 0;
849 for (i=0;i<tdb->num_lockrecs;i++) {
850 struct tdb_lock_type *lck = &tdb->lockrecs[i];
852 /* Don't release transaction or active locks! */
853 if (tdb->transaction && lck->off == TRANSACTION_LOCK) {
854 tdb->lockrecs[extra++] = *lck;
855 } else if (lck->off == ACTIVE_LOCK) {
856 tdb->lockrecs[extra++] = *lck;
858 tdb_brunlock(tdb, lck->ltype, lck->off, 1);
861 tdb->num_lockrecs = extra;
862 if (tdb->num_lockrecs == 0) {
863 SAFE_FREE(tdb->lockrecs);