]> git.ozlabs.org Git - ccan/blobdiff - ccan/tdb2/lock.c
read_write_all: avoid arithmetic on void pointers.
[ccan] / ccan / tdb2 / lock.c
index 50db470c9c817e0c02f37b3e3b8ae950c3710cd4..17e80d70caef02610af5fc76efcefc1556f614c6 100644 (file)
 #include <assert.h>
 #include <ccan/build_assert/build_assert.h>
 
+/* If we were threaded, we could wait for unlock, but we're not, so fail. */
+static enum TDB_ERROR owner_conflict(struct tdb_context *tdb, const char *call)
+{
+       return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
+                         "%s: lock owned by another tdb in this process.",
+                         call);
+}
+
 static int fcntl_lock(struct tdb_context *tdb,
                      int rw, off_t off, off_t len, bool waitflag)
 {
@@ -42,10 +50,10 @@ static int fcntl_lock(struct tdb_context *tdb,
 
        add_stat(tdb, lock_lowlevel, 1);
        if (waitflag)
-               return fcntl(tdb->fd, F_SETLKW, &fl);
+               return fcntl(tdb->file->fd, F_SETLKW, &fl);
        else {
                add_stat(tdb, lock_nonblock, 1);
-               return fcntl(tdb->fd, F_SETLK, &fl);
+               return fcntl(tdb->file->fd, F_SETLK, &fl);
        }
 }
 
@@ -116,11 +124,11 @@ static int fcntl_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
        fl.l_len = len;
        fl.l_pid = 0;
 
-       return fcntl(tdb->fd, F_SETLKW, &fl);
+       return fcntl(tdb->file->fd, F_SETLKW, &fl);
 }
 
 /* a byte range locking function - return 0 on success
-   this functions locks/unlocks 1 byte at the specified offset.
+   this functions locks len bytes at the specified offset.
 
    note that a len of zero means lock to end of file
 */
@@ -161,7 +169,7 @@ static enum TDB_ERROR tdb_brlock(struct tdb_context *tdb,
                                   "tdb_brlock failed (fd=%d) at"
                                   " offset %zu rw_type=%d flags=%d len=%zu:"
                                   " %s",
-                                  tdb->fd, (size_t)offset, rw_type,
+                                  tdb->file->fd, (size_t)offset, rw_type,
                                   flags, (size_t)len, strerror(errno));
                }
                return TDB_ERR_LOCK;
@@ -186,7 +194,7 @@ static enum TDB_ERROR tdb_brunlock(struct tdb_context *tdb,
                return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
                                  "tdb_brunlock failed (fd=%d) at offset %zu"
                                  " rw_type=%d len=%zu",
-                                 tdb->fd, (size_t)offset, rw_type,
+                                 tdb->file->fd, (size_t)offset, rw_type,
                                  (size_t)len);
        }
        return TDB_SUCCESS;
@@ -198,21 +206,25 @@ static enum TDB_ERROR tdb_brunlock(struct tdb_context *tdb,
   deadlock detection and claim a deadlock when progress can be
   made. For those OSes we may loop for a while.
 */
-int tdb_allrecord_upgrade(struct tdb_context *tdb)
+enum TDB_ERROR tdb_allrecord_upgrade(struct tdb_context *tdb)
 {
        int count = 1000;
 
-       if (tdb->allrecord_lock.count != 1) {
-               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
-                          "tdb_allrecord_upgrade failed: count %u too high",
-                          tdb->allrecord_lock.count);
-               return -1;
+       if (tdb->file->allrecord_lock.count != 1) {
+               return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
+                                 "tdb_allrecord_upgrade failed:"
+                                 " count %u too high",
+                                 tdb->file->allrecord_lock.count);
        }
 
-       if (tdb->allrecord_lock.off != 1) {
-               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
-                          "tdb_allrecord_upgrade failed: already upgraded?");
-               return -1;
+       if (tdb->file->allrecord_lock.off != 1) {
+               return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
+                                 "tdb_allrecord_upgrade failed:"
+                                 " already upgraded?");
+       }
+
+       if (tdb->file->allrecord_lock.owner != tdb) {
+               return owner_conflict(tdb, "tdb_allrecord_upgrade");
        }
 
        while (count--) {
@@ -220,9 +232,9 @@ int tdb_allrecord_upgrade(struct tdb_context *tdb)
                if (tdb_brlock(tdb, F_WRLCK,
                               TDB_HASH_LOCK_START, 0,
                               TDB_LOCK_WAIT|TDB_LOCK_PROBE) == TDB_SUCCESS) {
-                       tdb->allrecord_lock.ltype = F_WRLCK;
-                       tdb->allrecord_lock.off = 0;
-                       return 0;
+                       tdb->file->allrecord_lock.ltype = F_WRLCK;
+                       tdb->file->allrecord_lock.off = 0;
+                       return TDB_SUCCESS;
                }
                if (errno != EDEADLK) {
                        break;
@@ -232,148 +244,157 @@ int tdb_allrecord_upgrade(struct tdb_context *tdb)
                tv.tv_usec = 1;
                select(0, NULL, NULL, NULL, &tv);
        }
-       tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
-                  "tdb_allrecord_upgrade failed");
-       return -1;
+       return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
+                         "tdb_allrecord_upgrade failed");
 }
 
-static struct tdb_lock_type *find_nestlock(struct tdb_context *tdb,
-                                          tdb_off_t offset)
+static struct tdb_lock *find_nestlock(struct tdb_context *tdb, tdb_off_t offset,
+                                     const struct tdb_context *owner)
 {
        unsigned int i;
 
-       for (i=0; i<tdb->num_lockrecs; i++) {
-               if (tdb->lockrecs[i].off == offset) {
-                       return &tdb->lockrecs[i];
+       for (i=0; i<tdb->file->num_lockrecs; i++) {
+               if (tdb->file->lockrecs[i].off == offset) {
+                       if (owner && tdb->file->lockrecs[i].owner != owner)
+                               return NULL;
+                       return &tdb->file->lockrecs[i];
                }
        }
        return NULL;
 }
 
-int tdb_lock_and_recover(struct tdb_context *tdb)
+enum TDB_ERROR tdb_lock_and_recover(struct tdb_context *tdb)
 {
-       int ret;
+       enum TDB_ERROR ecode;
 
-       if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK,
-                              false) == -1) {
-               return -1;
+       ecode = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK,
+                                  false);
+       if (ecode != TDB_SUCCESS) {
+               return ecode;
        }
 
-       if (tdb_lock_open(tdb, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK) == -1) {
+       ecode = tdb_lock_open(tdb, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK);
+       if (ecode != TDB_SUCCESS) {
                tdb_allrecord_unlock(tdb, F_WRLCK);
-               return -1;
+               return ecode;
        }
-       ret = tdb_transaction_recover(tdb);
-
+       ecode = tdb_transaction_recover(tdb);
        tdb_unlock_open(tdb);
        tdb_allrecord_unlock(tdb, F_WRLCK);
 
-       return ret;
+       return ecode;
 }
 
 /* lock an offset in the database. */
-static int tdb_nest_lock(struct tdb_context *tdb, tdb_off_t offset, int ltype,
-                        enum tdb_lock_flags flags)
+static enum TDB_ERROR tdb_nest_lock(struct tdb_context *tdb,
+                                   tdb_off_t offset, int ltype,
+                                   enum tdb_lock_flags flags)
 {
-       struct tdb_lock_type *new_lck;
+       struct tdb_lock *new_lck;
        enum TDB_ERROR ecode;
 
-       if (offset > TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE + tdb->map_size / 8) {
-               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
-                          "tdb_nest_lock: invalid offset %zu ltype=%d",
-                          (size_t)offset, ltype);
-               return -1;
+       if (offset > (TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
+                     + tdb->file->map_size / 8)) {
+               return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
+                                 "tdb_nest_lock: invalid offset %zu ltype=%d",
+                                 (size_t)offset, ltype);
        }
 
        if (tdb->flags & TDB_NOLOCK)
-               return 0;
+               return TDB_SUCCESS;
 
        add_stat(tdb, locks, 1);
 
-       new_lck = find_nestlock(tdb, offset);
+       new_lck = find_nestlock(tdb, offset, NULL);
        if (new_lck) {
+               if (new_lck->owner != tdb) {
+                       return owner_conflict(tdb, "tdb_nest_lock");
+               }
+
                if (new_lck->ltype == F_RDLCK && ltype == F_WRLCK) {
-                       tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
-                                  "tdb_nest_lock: offset %zu has read lock",
-                                  (size_t)offset);
-                       return -1;
+                       return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
+                                         "tdb_nest_lock:"
+                                         " offset %zu has read lock",
+                                         (size_t)offset);
                }
                /* Just increment the struct, posix locks don't stack. */
                new_lck->count++;
-               return 0;
+               return TDB_SUCCESS;
        }
 
-       if (tdb->num_lockrecs
+       if (tdb->file->num_lockrecs
            && offset >= TDB_HASH_LOCK_START
            && offset < TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE) {
-               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
-                          "tdb_nest_lock: already have a hash lock?");
-               return -1;
+               return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
+                                 "tdb_nest_lock: already have a hash lock?");
        }
 
-       new_lck = (struct tdb_lock_type *)realloc(
-               tdb->lockrecs,
-               sizeof(*tdb->lockrecs) * (tdb->num_lockrecs+1));
+       new_lck = (struct tdb_lock *)realloc(
+               tdb->file->lockrecs,
+               sizeof(*tdb->file->lockrecs) * (tdb->file->num_lockrecs+1));
        if (new_lck == NULL) {
-               tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
-                        "tdb_nest_lock: unable to allocate %zu lock struct",
-                        tdb->num_lockrecs + 1);
-               errno = ENOMEM;
-               return -1;
+               return tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
+                                 "tdb_nest_lock:"
+                                 " unable to allocate %zu lock struct",
+                                 tdb->file->num_lockrecs + 1);
        }
-       tdb->lockrecs = new_lck;
+       tdb->file->lockrecs = new_lck;
 
        /* Since fcntl locks don't nest, we do a lock for the first one,
           and simply bump the count for future ones */
        ecode = tdb_brlock(tdb, ltype, offset, 1, flags);
-       if (ecode) {
-               tdb->ecode = ecode;
-               return -1;
+       if (ecode != TDB_SUCCESS) {
+               return ecode;
        }
 
        /* First time we grab a lock, perhaps someone died in commit? */
        if (!(flags & TDB_LOCK_NOCHECK)
-           && tdb->num_lockrecs == 0
-           && unlikely(tdb_needs_recovery(tdb))) {
-               tdb_brunlock(tdb, ltype, offset, 1);
-
-               if (tdb_lock_and_recover(tdb) == -1) {
-                       return -1;
-               }
-
-               ecode = tdb_brlock(tdb, ltype, offset, 1, flags);
-               if (ecode != TDB_SUCCESS) {
-                       tdb->ecode = ecode;
-                       return -1;
+           && tdb->file->num_lockrecs == 0) {
+               tdb_bool_err berr = tdb_needs_recovery(tdb);
+               if (berr != false) {
+                       tdb_brunlock(tdb, ltype, offset, 1);
+
+                       if (berr < 0)
+                               return berr;
+                       ecode = tdb_lock_and_recover(tdb);
+                       if (ecode == TDB_SUCCESS) {
+                               ecode = tdb_brlock(tdb, ltype, offset, 1,
+                                                  flags);
+                       }
+                       if (ecode != TDB_SUCCESS) {
+                               return ecode;
+                       }
                }
        }
 
-       tdb->lockrecs[tdb->num_lockrecs].off = offset;
-       tdb->lockrecs[tdb->num_lockrecs].count = 1;
-       tdb->lockrecs[tdb->num_lockrecs].ltype = ltype;
-       tdb->num_lockrecs++;
+       tdb->file->lockrecs[tdb->file->num_lockrecs].owner = tdb;
+       tdb->file->lockrecs[tdb->file->num_lockrecs].off = offset;
+       tdb->file->lockrecs[tdb->file->num_lockrecs].count = 1;
+       tdb->file->lockrecs[tdb->file->num_lockrecs].ltype = ltype;
+       tdb->file->num_lockrecs++;
 
-       return 0;
+       return TDB_SUCCESS;
 }
 
-static int tdb_nest_unlock(struct tdb_context *tdb, tdb_off_t off, int ltype)
+static enum TDB_ERROR tdb_nest_unlock(struct tdb_context *tdb,
+                                     tdb_off_t off, int ltype)
 {
-       struct tdb_lock_type *lck;
+       struct tdb_lock *lck;
        enum TDB_ERROR ecode;
 
        if (tdb->flags & TDB_NOLOCK)
-               return 0;
+               return TDB_SUCCESS;
 
-       lck = find_nestlock(tdb, off);
+       lck = find_nestlock(tdb, off, tdb);
        if ((lck == NULL) || (lck->count == 0)) {
-               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
-                          "tdb_nest_unlock: no lock for %zu", (size_t)off);
-               return -1;
+               return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
+                                 "tdb_nest_unlock: no lock for %zu",
+                                 (size_t)off);
        }
 
        if (lck->count > 1) {
                lck->count--;
-               return 0;
+               return TDB_SUCCESS;
        }
 
        /*
@@ -388,19 +409,15 @@ static int tdb_nest_unlock(struct tdb_context *tdb, tdb_off_t off, int ltype)
         * Shrink the array by overwriting the element just unlocked with the
         * last array element.
         */
-       *lck = tdb->lockrecs[--tdb->num_lockrecs];
+       *lck = tdb->file->lockrecs[--tdb->file->num_lockrecs];
 
-       if (ecode != TDB_SUCCESS) {
-               tdb->ecode = ecode;
-               return -1;
-       }
-       return 0;
+       return ecode;
 }
 
 /*
   get the transaction lock
  */
-int tdb_transaction_lock(struct tdb_context *tdb, int ltype)
+enum TDB_ERROR tdb_transaction_lock(struct tdb_context *tdb, int ltype)
 {
        return tdb_nest_lock(tdb, TDB_TRANSACTION_LOCK, ltype, TDB_LOCK_WAIT);
 }
@@ -408,9 +425,9 @@ int tdb_transaction_lock(struct tdb_context *tdb, int ltype)
 /*
   release the transaction lock
  */
-int tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
+void tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
 {
-       return tdb_nest_unlock(tdb, TDB_TRANSACTION_LOCK, ltype);
+       tdb_nest_unlock(tdb, TDB_TRANSACTION_LOCK, ltype);
 }
 
 /* We only need to lock individual bytes, but Linux merges consecutive locks
@@ -449,45 +466,42 @@ static enum TDB_ERROR tdb_lock_gradual(struct tdb_context *tdb,
 
 /* lock/unlock entire database.  It can only be upgradable if you have some
  * other way of guaranteeing exclusivity (ie. transaction write lock). */
-int tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
-                      enum tdb_lock_flags flags, bool upgradable)
+enum TDB_ERROR tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
+                                 enum tdb_lock_flags flags, bool upgradable)
 {
        enum TDB_ERROR ecode;
+       tdb_bool_err berr;
 
-       /* FIXME: There are no locks on read-only dbs */
-       if (tdb->read_only) {
-               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
-                          "tdb_allrecord_lock: read-only");
-               return -1;
-       }
+       if (tdb->file->allrecord_lock.count) {
+               if (tdb->file->allrecord_lock.owner != tdb) {
+                       return owner_conflict(tdb, "tdb_allrecord_lock");
+               }
 
-       if (tdb->allrecord_lock.count
-           && (ltype == F_RDLCK || tdb->allrecord_lock.ltype == F_WRLCK)) {
-               tdb->allrecord_lock.count++;
-               return 0;
-       }
+               if (ltype == F_RDLCK
+                   || tdb->file->allrecord_lock.ltype == F_WRLCK) {
+                       tdb->file->allrecord_lock.count++;
+                       return TDB_SUCCESS;
+               }
 
-       if (tdb->allrecord_lock.count) {
                /* a global lock of a different type exists */
-               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
-                          "tdb_allrecord_lock: already have %s lock",
-                          tdb->allrecord_lock.ltype == F_RDLCK
-                          ? "read" : "write");
-               return -1;
+               return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
+                                 "tdb_allrecord_lock: already have %s lock",
+                                 tdb->file->allrecord_lock.ltype == F_RDLCK
+                                 ? "read" : "write");
        }
 
        if (tdb_has_hash_locks(tdb)) {
                /* can't combine global and chain locks */
-               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
-                        "tdb_allrecord_lock: already have chain lock");
-               return -1;
+               return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
+                                 "tdb_allrecord_lock:"
+                                 " already have chain lock");
        }
 
        if (upgradable && ltype != F_RDLCK) {
                /* tdb error: you can't upgrade a write lock! */
-               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
-                          "tdb_allrecord_lock: can't upgrade a write lock");
-               return -1;
+               return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
+                                 "tdb_allrecord_lock:"
+                                 " can't upgrade a write lock");
        }
 
        add_stat(tdb, locks, 1);
@@ -496,12 +510,11 @@ again:
        ecode = tdb_lock_gradual(tdb, ltype, flags, TDB_HASH_LOCK_START,
                                 TDB_HASH_LOCK_RANGE);
        if (ecode != TDB_SUCCESS) {
-               tdb->ecode = ecode;
                if (!(flags & TDB_LOCK_PROBE)) {
-                       tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
+                       tdb_logerr(tdb, ecode, TDB_LOG_ERROR,
                                   "tdb_allrecord_lock hashes failed");
                }
-               return -1;
+               return ecode;
        }
 
        /* Lock free tables: there to end of file. */
@@ -509,35 +522,41 @@ again:
                           TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE,
                           0, flags);
        if (ecode != TDB_SUCCESS) {
-               tdb->ecode = ecode;
                if (!(flags & TDB_LOCK_PROBE)) {
-                       tdb_logerr(tdb, tdb->ecode, TDB_LOG_ERROR,
+                       tdb_logerr(tdb, ecode, TDB_LOG_ERROR,
                                 "tdb_allrecord_lock freetables failed");
                }
                tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START,
                             TDB_HASH_LOCK_RANGE);
-               return -1;
+               return ecode;
        }
 
-       tdb->allrecord_lock.count = 1;
+       tdb->file->allrecord_lock.owner = tdb;
+       tdb->file->allrecord_lock.count = 1;
        /* If it's upgradable, it's actually exclusive so we can treat
         * it as a write lock. */
-       tdb->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
-       tdb->allrecord_lock.off = upgradable;
+       tdb->file->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
+       tdb->file->allrecord_lock.off = upgradable;
 
        /* Now check for needing recovery. */
-       if (!(flags & TDB_LOCK_NOCHECK) && unlikely(tdb_needs_recovery(tdb))) {
-               tdb_allrecord_unlock(tdb, ltype);
-               if (tdb_lock_and_recover(tdb) == -1) {
-                       return -1;
-               }
-               goto again;
-       }
+       if (flags & TDB_LOCK_NOCHECK)
+               return TDB_SUCCESS;
+
+       berr = tdb_needs_recovery(tdb);
+       if (likely(berr == false))
+               return TDB_SUCCESS;
 
-       return 0;
+       tdb_allrecord_unlock(tdb, ltype);
+       if (berr < 0)
+               return berr;
+       ecode = tdb_lock_and_recover(tdb);
+       if (ecode != TDB_SUCCESS) {
+               return ecode;
+       }
+       goto again;
 }
 
-int tdb_lock_open(struct tdb_context *tdb, enum tdb_lock_flags flags)
+enum TDB_ERROR tdb_lock_open(struct tdb_context *tdb, enum tdb_lock_flags flags)
 {
        return tdb_nest_lock(tdb, TDB_OPEN_LOCK, F_WRLCK, flags);
 }
@@ -549,10 +568,11 @@ void tdb_unlock_open(struct tdb_context *tdb)
 
 bool tdb_has_open_lock(struct tdb_context *tdb)
 {
-       return find_nestlock(tdb, TDB_OPEN_LOCK) != NULL;
+       return !(tdb->flags & TDB_NOLOCK)
+               && find_nestlock(tdb, TDB_OPEN_LOCK, tdb) != NULL;
 }
 
-int tdb_lock_expand(struct tdb_context *tdb, int ltype)
+enum TDB_ERROR tdb_lock_expand(struct tdb_context *tdb, int ltype)
 {
        /* Lock doesn't protect data, so don't check (we recurse if we do!) */
        return tdb_nest_lock(tdb, TDB_EXPANSION_LOCK, ltype,
@@ -565,48 +585,54 @@ void tdb_unlock_expand(struct tdb_context *tdb, int ltype)
 }
 
 /* unlock entire db */
-int tdb_allrecord_unlock(struct tdb_context *tdb, int ltype)
+void tdb_allrecord_unlock(struct tdb_context *tdb, int ltype)
 {
-       if (tdb->allrecord_lock.count == 0) {
+       if (tdb->file->allrecord_lock.count == 0) {
                tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
                           "tdb_allrecord_unlock: not locked!");
-               return -1;
+               return;
+       }
+
+       if (tdb->file->allrecord_lock.owner != tdb) {
+               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
+                          "tdb_allrecord_unlock: not locked by us!");
+               return;
        }
 
        /* Upgradable locks are marked as write locks. */
-       if (tdb->allrecord_lock.ltype != ltype
-           && (!tdb->allrecord_lock.off || ltype != F_RDLCK)) {
+       if (tdb->file->allrecord_lock.ltype != ltype
+           && (!tdb->file->allrecord_lock.off || ltype != F_RDLCK)) {
                tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
-                        "tdb_allrecord_unlock: have %s lock",
-                          tdb->allrecord_lock.ltype == F_RDLCK
+                          "tdb_allrecord_unlock: have %s lock",
+                          tdb->file->allrecord_lock.ltype == F_RDLCK
                           ? "read" : "write");
-               return -1;
+               return;
        }
 
-       if (tdb->allrecord_lock.count > 1) {
-               tdb->allrecord_lock.count--;
-               return 0;
+       if (tdb->file->allrecord_lock.count > 1) {
+               tdb->file->allrecord_lock.count--;
+               return;
        }
 
-       tdb->allrecord_lock.count = 0;
-       tdb->allrecord_lock.ltype = 0;
+       tdb->file->allrecord_lock.count = 0;
+       tdb->file->allrecord_lock.ltype = 0;
 
-       return tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START, 0);
+       tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START, 0);
 }
 
 bool tdb_has_expansion_lock(struct tdb_context *tdb)
 {
-       return find_nestlock(tdb, TDB_EXPANSION_LOCK) != NULL;
+       return find_nestlock(tdb, TDB_EXPANSION_LOCK, tdb) != NULL;
 }
 
 bool tdb_has_hash_locks(struct tdb_context *tdb)
 {
        unsigned int i;
 
-       for (i=0; i<tdb->num_lockrecs; i++) {
-               if (tdb->lockrecs[i].off >= TDB_HASH_LOCK_START
-                   && tdb->lockrecs[i].off < (TDB_HASH_LOCK_START
-                                              + TDB_HASH_LOCK_RANGE))
+       for (i=0; i<tdb->file->num_lockrecs; i++) {
+               if (tdb->file->lockrecs[i].off >= TDB_HASH_LOCK_START
+                   && tdb->file->lockrecs[i].off < (TDB_HASH_LOCK_START
+                                                    + TDB_HASH_LOCK_RANGE))
                        return true;
        }
        return false;
@@ -616,68 +642,74 @@ static bool tdb_has_free_lock(struct tdb_context *tdb)
 {
        unsigned int i;
 
-       for (i=0; i<tdb->num_lockrecs; i++) {
-               if (tdb->lockrecs[i].off
+       if (tdb->flags & TDB_NOLOCK)
+               return false;
+
+       for (i=0; i<tdb->file->num_lockrecs; i++) {
+               if (tdb->file->lockrecs[i].off
                    > TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE)
                        return true;
        }
        return false;
 }
 
-int tdb_lock_hashes(struct tdb_context *tdb,
-                   tdb_off_t hash_lock,
-                   tdb_len_t hash_range,
-                   int ltype, enum tdb_lock_flags waitflag)
+enum TDB_ERROR tdb_lock_hashes(struct tdb_context *tdb,
+                              tdb_off_t hash_lock,
+                              tdb_len_t hash_range,
+                              int ltype, enum tdb_lock_flags waitflag)
 {
        /* FIXME: Do this properly, using hlock_range */
        unsigned lock = TDB_HASH_LOCK_START
                + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
 
        /* a allrecord lock allows us to avoid per chain locks */
-       if (tdb->allrecord_lock.count &&
-           (ltype == tdb->allrecord_lock.ltype || ltype == F_RDLCK)) {
-               return 0;
-       }
+       if (tdb->file->allrecord_lock.count) {
+               if (tdb->file->allrecord_lock.owner != tdb)
+                       return owner_conflict(tdb, "tdb_lock_hashes");
+               if (ltype == tdb->file->allrecord_lock.ltype
+                   || ltype == F_RDLCK) {
+                       return TDB_SUCCESS;
+               }
 
-       if (tdb->allrecord_lock.count) {
-               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
-                          "tdb_lock_hashes: already have %s allrecordlock",
-                          tdb->allrecord_lock.ltype == F_RDLCK
-                          ? "read" : "write");
-               return -1;
+               return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
+                                 "tdb_lock_hashes:"
+                                 " already have %s allrecordlock",
+                                 tdb->file->allrecord_lock.ltype == F_RDLCK
+                                 ? "read" : "write");
        }
 
        if (tdb_has_free_lock(tdb)) {
-               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
-                          "tdb_lock_hashes: already have free lock");
-               return -1;
+               return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
+                                 "tdb_lock_hashes: already have free lock");
        }
 
        if (tdb_has_expansion_lock(tdb)) {
-               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
-                          "tdb_lock_hashes: already have expansion lock");
-               return -1;
+               return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
+                                 "tdb_lock_hashes:"
+                                 " already have expansion lock");
        }
 
        return tdb_nest_lock(tdb, lock, ltype, waitflag);
 }
 
-int tdb_unlock_hashes(struct tdb_context *tdb,
-                     tdb_off_t hash_lock,
-                     tdb_len_t hash_range, int ltype)
+enum TDB_ERROR tdb_unlock_hashes(struct tdb_context *tdb,
+                                tdb_off_t hash_lock,
+                                tdb_len_t hash_range, int ltype)
 {
        unsigned lock = TDB_HASH_LOCK_START
                + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
 
+       if (tdb->flags & TDB_NOLOCK)
+               return 0;
+
        /* a allrecord lock allows us to avoid per chain locks */
-       if (tdb->allrecord_lock.count) {
-               if (tdb->allrecord_lock.ltype == F_RDLCK
+       if (tdb->file->allrecord_lock.count) {
+               if (tdb->file->allrecord_lock.ltype == F_RDLCK
                    && ltype == F_WRLCK) {
-                       tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
-                                  "tdb_unlock_hashes RO allrecord!");
-                       return -1;
+                       return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
+                                         "tdb_unlock_hashes RO allrecord!");
                }
-               return 0;
+               return TDB_SUCCESS;
        }
 
        return tdb_nest_unlock(tdb, lock, ltype);
@@ -694,25 +726,28 @@ static tdb_off_t free_lock_off(tdb_off_t b_off)
                + b_off / sizeof(tdb_off_t);
 }
 
-int tdb_lock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off,
-                        enum tdb_lock_flags waitflag)
+enum TDB_ERROR tdb_lock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off,
+                                   enum tdb_lock_flags waitflag)
 {
        assert(b_off >= sizeof(struct tdb_header));
 
+       if (tdb->flags & TDB_NOLOCK)
+               return 0;
+
        /* a allrecord lock allows us to avoid per chain locks */
-       if (tdb->allrecord_lock.count) {
-               if (tdb->allrecord_lock.ltype == F_WRLCK)
+       if (tdb->file->allrecord_lock.count) {
+               if (tdb->file->allrecord_lock.ltype == F_WRLCK)
                        return 0;
-               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
-                        "tdb_lock_free_bucket with RO allrecordlock!");
-               return -1;
+               return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
+                                 "tdb_lock_free_bucket with"
+                                 " read-only allrecordlock!");
        }
 
 #if 0 /* FIXME */
        if (tdb_has_expansion_lock(tdb)) {
-               tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
-                          "tdb_lock_free_bucket: already have expansion lock");
-               return -1;
+               return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
+                                 "tdb_lock_free_bucket:"
+                                 " already have expansion lock");
        }
 #endif
 
@@ -721,15 +756,27 @@ int tdb_lock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off,
 
 void tdb_unlock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off)
 {
-       if (tdb->allrecord_lock.count)
+       if (tdb->file->allrecord_lock.count)
                return;
 
        tdb_nest_unlock(tdb, free_lock_off(b_off), F_WRLCK);
 }
 
-void tdb_lock_init(struct tdb_context *tdb)
+void tdb_unlock_all(struct tdb_context *tdb)
 {
-       tdb->num_lockrecs = 0;
-       tdb->lockrecs = NULL;
-       tdb->allrecord_lock.count = 0;
+       unsigned int i;
+
+       while (tdb->file->allrecord_lock.count
+              && tdb->file->allrecord_lock.owner == tdb) {
+               tdb_allrecord_unlock(tdb, tdb->file->allrecord_lock.ltype);
+       }
+
+       for (i=0; i<tdb->file->num_lockrecs; i++) {
+               if (tdb->file->lockrecs[i].owner == tdb) {
+                       tdb_nest_unlock(tdb,
+                                       tdb->file->lockrecs[i].off,
+                                       tdb->file->lockrecs[i].ltype);
+                       i--;
+               }
+       }
 }