X-Git-Url: https://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Ftdb2%2Ftdb1_lock.c;h=85348df04dc67c0a4e2504127bf4b7ccd1e2a2b1;hp=68f394aad24c8b2ddeb1b4557e26fd1247d7b101;hb=60210a73ec08a7b34ba637ad19e6749cf6dc1952;hpb=919937354a331bb964564a11b5a5b80403ff8db9;ds=sidebyside diff --git a/ccan/tdb2/tdb1_lock.c b/ccan/tdb2/tdb1_lock.c index 68f394aa..85348df0 100644 --- a/ccan/tdb2/tdb1_lock.c +++ b/ccan/tdb2/tdb1_lock.c @@ -39,9 +39,9 @@ static int fcntl_lock(struct tdb1_context *tdb, fl.l_pid = 0; if (waitflag) - return fcntl(tdb->fd, F_SETLKW, &fl); + return fcntl(tdb->file->fd, F_SETLKW, &fl); else - return fcntl(tdb->fd, F_SETLK, &fl); + return fcntl(tdb->file->fd, F_SETLK, &fl); } static int fcntl_unlock(struct tdb1_context *tdb, int rw, off_t off, off_t len) @@ -111,7 +111,7 @@ static int fcntl_unlock(struct tdb1_context *tdb, int rw, off_t off, off_t len) fl.l_len = len; fl.l_pid = 0; - return fcntl(tdb->fd, F_SETLKW, &fl); + return fcntl(tdb->file->fd, F_SETLKW, &fl); } /* list -1 is the alloc list, otherwise a hash chain. */ @@ -130,11 +130,11 @@ static tdb1_off_t lock_offset(int list) */ int tdb1_brlock(struct tdb1_context *tdb, int rw_type, tdb1_off_t offset, size_t len, - enum tdb1_lock_flags flags) + enum tdb_lock_flags flags) { int ret; - if (tdb->flags & TDB1_NOLOCK) { + if (tdb->flags & TDB_NOLOCK) { return 0; } @@ -145,7 +145,7 @@ int tdb1_brlock(struct tdb1_context *tdb, do { ret = fcntl_lock(tdb, rw_type, offset, len, - flags & TDB1_LOCK_WAIT); + flags & TDB_LOCK_WAIT); } while (ret == -1 && errno == EINTR); if (ret == -1) { @@ -153,10 +153,10 @@ int tdb1_brlock(struct tdb1_context *tdb, /* Generic lock error. errno set by fcntl. * EAGAIN is an expected return from non-blocking * locks. */ - if (!(flags & TDB1_LOCK_PROBE) && errno != EAGAIN) { + if (!(flags & TDB_LOCK_PROBE) && errno != EAGAIN) { tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR, "tdb1_brlock failed (fd=%d) at offset %d rw_type=%d flags=%d len=%d", - tdb->fd, offset, rw_type, flags, (int)len); + tdb->file->fd, offset, rw_type, flags, (int)len); } return -1; } @@ -168,7 +168,7 @@ int tdb1_brunlock(struct tdb1_context *tdb, { int ret; - if (tdb->flags & TDB1_NOLOCK) { + if (tdb->flags & TDB_NOLOCK) { return 0; } @@ -180,7 +180,7 @@ int tdb1_brunlock(struct tdb1_context *tdb, tdb->last_error = tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR, "tdb1_brunlock failed (fd=%d) at offset" " %d rw_type=%d len=%d", - tdb->fd, offset, rw_type, (int)len); + tdb->file->fd, offset, rw_type, (int)len); } return ret; } @@ -195,15 +195,15 @@ int tdb1_allrecord_upgrade(struct tdb1_context *tdb) { int count = 1000; - if (tdb->allrecord_lock.count != 1) { + if (tdb->file->allrecord_lock.count != 1) { tdb->last_error = tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR, "tdb1_allrecord_upgrade failed: " "count %u too high", - tdb->allrecord_lock.count); + tdb->file->allrecord_lock.count); return -1; } - if (tdb->allrecord_lock.off != 1) { + if (tdb->file->allrecord_lock.off != 1) { tdb->last_error = tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR, "tdb1_allrecord_upgrade failed:" " already upgraded?"); @@ -213,9 +213,9 @@ int tdb1_allrecord_upgrade(struct tdb1_context *tdb) while (count--) { struct timeval tv; if (tdb1_brlock(tdb, F_WRLCK, TDB1_FREELIST_TOP, 0, - TDB1_LOCK_WAIT|TDB1_LOCK_PROBE) == 0) { - tdb->allrecord_lock.ltype = F_WRLCK; - tdb->allrecord_lock.off = 0; + TDB_LOCK_WAIT|TDB_LOCK_PROBE) == 0) { + tdb->file->allrecord_lock.ltype = F_WRLCK; + tdb->file->allrecord_lock.off = 0; return 0; } if (errno != EDEADLK) { @@ -231,14 +231,14 @@ int tdb1_allrecord_upgrade(struct tdb1_context *tdb) return -1; } -static struct tdb1_lock_type *tdb1_find_nestlock(struct tdb1_context *tdb, - tdb1_off_t offset) +static struct tdb_lock *tdb1_find_nestlock(struct tdb1_context *tdb, + tdb1_off_t offset) { unsigned int i; - for (i=0; inum_lockrecs; i++) { - if (tdb->lockrecs[i].off == offset) { - return &tdb->lockrecs[i]; + for (i=0; ifile->num_lockrecs; i++) { + if (tdb->file->lockrecs[i].off == offset) { + return &tdb->file->lockrecs[i]; } } return NULL; @@ -246,9 +246,9 @@ static struct tdb1_lock_type *tdb1_find_nestlock(struct tdb1_context *tdb, /* lock an offset in the database. */ int tdb1_nest_lock(struct tdb1_context *tdb, uint32_t offset, int ltype, - enum tdb1_lock_flags flags) + enum tdb_lock_flags flags) { - struct tdb1_lock_type *new_lck; + struct tdb_lock *new_lck; if (offset >= lock_offset(tdb->header.hash_size)) { tdb->last_error = tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR, @@ -257,7 +257,7 @@ int tdb1_nest_lock(struct tdb1_context *tdb, uint32_t offset, int ltype, offset, ltype); return -1; } - if (tdb->flags & TDB1_NOLOCK) + if (tdb->flags & TDB_NOLOCK) return 0; new_lck = tdb1_find_nestlock(tdb, offset); @@ -270,14 +270,14 @@ int tdb1_nest_lock(struct tdb1_context *tdb, uint32_t offset, int ltype, return 0; } - new_lck = (struct tdb1_lock_type *)realloc( - tdb->lockrecs, - sizeof(*tdb->lockrecs) * (tdb->num_lockrecs+1)); + new_lck = (struct tdb_lock *)realloc( + tdb->file->lockrecs, + sizeof(*tdb->file->lockrecs) * (tdb->file->num_lockrecs+1)); if (new_lck == NULL) { errno = ENOMEM; return -1; } - tdb->lockrecs = new_lck; + tdb->file->lockrecs = new_lck; /* Since fcntl locks don't nest, we do a lock for the first one, and simply bump the count for future ones */ @@ -285,10 +285,10 @@ int tdb1_nest_lock(struct tdb1_context *tdb, uint32_t offset, int ltype, return -1; } - tdb->lockrecs[tdb->num_lockrecs].off = offset; - tdb->lockrecs[tdb->num_lockrecs].count = 1; - tdb->lockrecs[tdb->num_lockrecs].ltype = ltype; - tdb->num_lockrecs++; + tdb->file->lockrecs[tdb->file->num_lockrecs].off = offset; + tdb->file->lockrecs[tdb->file->num_lockrecs].count = 1; + tdb->file->lockrecs[tdb->file->num_lockrecs].ltype = ltype; + tdb->file->num_lockrecs++; return 0; } @@ -298,11 +298,11 @@ static int tdb1_lock_and_recover(struct tdb1_context *tdb) int ret; /* We need to match locking order in transaction commit. */ - if (tdb1_brlock(tdb, F_WRLCK, TDB1_FREELIST_TOP, 0, TDB1_LOCK_WAIT)) { + if (tdb1_brlock(tdb, F_WRLCK, TDB1_FREELIST_TOP, 0, TDB_LOCK_WAIT)) { return -1; } - if (tdb1_brlock(tdb, F_WRLCK, TDB1_OPEN_LOCK, 1, TDB1_LOCK_WAIT)) { + if (tdb1_brlock(tdb, F_WRLCK, TDB1_OPEN_LOCK, 1, TDB_LOCK_WAIT)) { tdb1_brunlock(tdb, F_WRLCK, TDB1_FREELIST_TOP, 0); return -1; } @@ -319,26 +319,26 @@ static bool have_data_locks(const struct tdb1_context *tdb) { unsigned int i; - for (i = 0; i < tdb->num_lockrecs; i++) { - if (tdb->lockrecs[i].off >= lock_offset(-1)) + for (i = 0; i < tdb->file->num_lockrecs; i++) { + if (tdb->file->lockrecs[i].off >= lock_offset(-1)) return true; } return false; } static int tdb1_lock_list(struct tdb1_context *tdb, int list, int ltype, - enum tdb1_lock_flags waitflag) + enum tdb_lock_flags waitflag) { int ret; bool check = false; /* a allrecord lock allows us to avoid per chain locks */ - if (tdb->allrecord_lock.count && - (ltype == tdb->allrecord_lock.ltype || ltype == F_RDLCK)) { + if (tdb->file->allrecord_lock.count && + (ltype == tdb->file->allrecord_lock.ltype || ltype == F_RDLCK)) { return 0; } - if (tdb->allrecord_lock.count) { + if (tdb->file->allrecord_lock.count) { tdb->last_error = TDB_ERR_LOCK; ret = -1; } else { @@ -363,7 +363,7 @@ int tdb1_lock(struct tdb1_context *tdb, int list, int ltype) { int ret; - ret = tdb1_lock_list(tdb, list, ltype, TDB1_LOCK_WAIT); + ret = tdb1_lock_list(tdb, list, ltype, TDB_LOCK_WAIT); if (ret) { tdb_logerr(tdb, tdb->last_error, TDB_LOG_ERROR, "tdb1_lock failed on list %d " @@ -375,9 +375,9 @@ int tdb1_lock(struct tdb1_context *tdb, int list, int ltype) int tdb1_nest_unlock(struct tdb1_context *tdb, uint32_t offset, int ltype) { int ret = -1; - struct tdb1_lock_type *lck; + struct tdb_lock *lck; - if (tdb->flags & TDB1_NOLOCK) + if (tdb->flags & TDB_NOLOCK) return 0; /* Sanity checks */ @@ -413,15 +413,15 @@ int tdb1_nest_unlock(struct tdb1_context *tdb, uint32_t offset, int ltype) * Shrink the array by overwriting the element just unlocked with the * last array element. */ - *lck = tdb->lockrecs[--tdb->num_lockrecs]; + *lck = tdb->file->lockrecs[--tdb->file->num_lockrecs]; /* * We don't bother with realloc when the array shrinks, but if we have * a completely idle tdb we should get rid of the locked array. */ - if (tdb->num_lockrecs == 0) { - SAFE_FREE(tdb->lockrecs); + if (tdb->file->num_lockrecs == 0) { + SAFE_FREE(tdb->file->lockrecs); } return ret; @@ -430,12 +430,12 @@ int tdb1_nest_unlock(struct tdb1_context *tdb, uint32_t offset, int ltype) int tdb1_unlock(struct tdb1_context *tdb, int list, int ltype) { /* a global lock allows us to avoid per chain locks */ - if (tdb->allrecord_lock.count && - (ltype == tdb->allrecord_lock.ltype || ltype == F_RDLCK)) { + if (tdb->file->allrecord_lock.count && + (ltype == tdb->file->allrecord_lock.ltype || ltype == F_RDLCK)) { return 0; } - if (tdb->allrecord_lock.count) { + if (tdb->file->allrecord_lock.count) { tdb->last_error = TDB_ERR_LOCK; return -1; } @@ -447,7 +447,7 @@ int tdb1_unlock(struct tdb1_context *tdb, int list, int ltype) get the transaction lock */ int tdb1_transaction_lock(struct tdb1_context *tdb, int ltype, - enum tdb1_lock_flags lockflags) + enum tdb_lock_flags lockflags) { return tdb1_nest_lock(tdb, TDB1_TRANSACTION_LOCK, ltype, lockflags); } @@ -462,7 +462,7 @@ int tdb1_transaction_unlock(struct tdb1_context *tdb, int ltype) /* Returns 0 if all done, -1 if error, 1 if ok. */ static int tdb1_allrecord_check(struct tdb1_context *tdb, int ltype, - enum tdb1_lock_flags flags, bool upgradable) + enum tdb_lock_flags flags, bool upgradable) { /* There are no locks on read-only dbs */ if (tdb->read_only || tdb->traverse_read) { @@ -470,12 +470,12 @@ static int tdb1_allrecord_check(struct tdb1_context *tdb, int ltype, return -1; } - if (tdb->allrecord_lock.count && tdb->allrecord_lock.ltype == ltype) { - tdb->allrecord_lock.count++; + if (tdb->file->allrecord_lock.count && tdb->file->allrecord_lock.ltype == ltype) { + tdb->file->allrecord_lock.count++; return 0; } - if (tdb->allrecord_lock.count) { + if (tdb->file->allrecord_lock.count) { /* a global lock of a different type exists */ tdb->last_error = TDB_ERR_LOCK; return -1; @@ -498,11 +498,11 @@ static int tdb1_allrecord_check(struct tdb1_context *tdb, int ltype, /* We only need to lock individual bytes, but Linux merges consecutive locks * so we lock in contiguous ranges. */ static int tdb1_chainlock_gradual(struct tdb1_context *tdb, - int ltype, enum tdb1_lock_flags flags, + int ltype, enum tdb_lock_flags flags, size_t off, size_t len) { int ret; - enum tdb1_lock_flags nb_flags = (flags & ~TDB1_LOCK_WAIT); + enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT); if (len <= 4) { /* Single record. Just do blocking lock. */ @@ -533,7 +533,7 @@ static int tdb1_chainlock_gradual(struct tdb1_context *tdb, * other way of guaranteeing exclusivity (ie. transaction write lock). * We do the locking gradually to avoid being starved by smaller locks. */ int tdb1_allrecord_lock(struct tdb1_context *tdb, int ltype, - enum tdb1_lock_flags flags, bool upgradable) + enum tdb_lock_flags flags, bool upgradable) { switch (tdb1_allrecord_check(tdb, ltype, flags, upgradable)) { case -1: @@ -562,11 +562,11 @@ int tdb1_allrecord_lock(struct tdb1_context *tdb, int ltype, return -1; } - tdb->allrecord_lock.count = 1; + tdb->file->allrecord_lock.count = 1; /* If it's upgradable, it's actually exclusive so we can treat * it as a write lock. */ - tdb->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype; - tdb->allrecord_lock.off = upgradable; + tdb->file->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype; + tdb->file->allrecord_lock.off = upgradable; if (tdb1_needs_recovery(tdb)) { tdb1_allrecord_unlock(tdb, ltype); @@ -590,20 +590,20 @@ int tdb1_allrecord_unlock(struct tdb1_context *tdb, int ltype) return -1; } - if (tdb->allrecord_lock.count == 0) { + if (tdb->file->allrecord_lock.count == 0) { tdb->last_error = TDB_ERR_LOCK; return -1; } /* Upgradable locks are marked as write locks. */ - if (tdb->allrecord_lock.ltype != ltype - && (!tdb->allrecord_lock.off || ltype != F_RDLCK)) { + if (tdb->file->allrecord_lock.ltype != ltype + && (!tdb->file->allrecord_lock.off || ltype != F_RDLCK)) { tdb->last_error = TDB_ERR_LOCK; return -1; } - if (tdb->allrecord_lock.count > 1) { - tdb->allrecord_lock.count--; + if (tdb->file->allrecord_lock.count > 1) { + tdb->file->allrecord_lock.count--; return 0; } @@ -613,8 +613,8 @@ int tdb1_allrecord_unlock(struct tdb1_context *tdb, int ltype) return -1; } - tdb->allrecord_lock.count = 0; - tdb->allrecord_lock.ltype = 0; + tdb->file->allrecord_lock.count = 0; + tdb->file->allrecord_lock.ltype = 0; return 0; } @@ -622,7 +622,7 @@ int tdb1_allrecord_unlock(struct tdb1_context *tdb, int ltype) /* lock entire database with write lock */ int tdb1_lockall(struct tdb1_context *tdb) { - return tdb1_allrecord_lock(tdb, F_WRLCK, TDB1_LOCK_WAIT, false); + return tdb1_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false); } /* unlock entire database with write lock */ @@ -634,7 +634,7 @@ int tdb1_unlockall(struct tdb1_context *tdb) /* lock entire database with read lock */ int tdb1_lockall_read(struct tdb1_context *tdb) { - return tdb1_allrecord_lock(tdb, F_RDLCK, TDB1_LOCK_WAIT, false); + return tdb1_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false); } /* unlock entire database with read lock */ @@ -645,25 +645,25 @@ int tdb1_unlockall_read(struct tdb1_context *tdb) /* lock/unlock one hash chain. This is meant to be used to reduce contention - it cannot guarantee how many records will be locked */ -int tdb1_chainlock(struct tdb1_context *tdb, TDB1_DATA key) +int tdb1_chainlock(struct tdb1_context *tdb, TDB_DATA key) { int ret = tdb1_lock(tdb, TDB1_BUCKET(tdb->hash_fn(&key)), F_WRLCK); return ret; } -int tdb1_chainunlock(struct tdb1_context *tdb, TDB1_DATA key) +int tdb1_chainunlock(struct tdb1_context *tdb, TDB_DATA key) { return tdb1_unlock(tdb, TDB1_BUCKET(tdb->hash_fn(&key)), F_WRLCK); } -int tdb1_chainlock_read(struct tdb1_context *tdb, TDB1_DATA key) +int tdb1_chainlock_read(struct tdb1_context *tdb, TDB_DATA key) { int ret; ret = tdb1_lock(tdb, TDB1_BUCKET(tdb->hash_fn(&key)), F_RDLCK); return ret; } -int tdb1_chainunlock_read(struct tdb1_context *tdb, TDB1_DATA key) +int tdb1_chainunlock_read(struct tdb1_context *tdb, TDB_DATA key) { return tdb1_unlock(tdb, TDB1_BUCKET(tdb->hash_fn(&key)), F_RDLCK); } @@ -671,10 +671,10 @@ int tdb1_chainunlock_read(struct tdb1_context *tdb, TDB1_DATA key) /* record lock stops delete underneath */ int tdb1_lock_record(struct tdb1_context *tdb, tdb1_off_t off) { - if (tdb->allrecord_lock.count) { + if (tdb->file->allrecord_lock.count) { return 0; } - return off ? tdb1_brlock(tdb, F_RDLCK, off, 1, TDB1_LOCK_WAIT) : 0; + return off ? tdb1_brlock(tdb, F_RDLCK, off, 1, TDB_LOCK_WAIT) : 0; } /* @@ -688,18 +688,18 @@ int tdb1_write_lock_record(struct tdb1_context *tdb, tdb1_off_t off) for (i = &tdb->travlocks; i; i = i->next) if (i->off == off) return -1; - if (tdb->allrecord_lock.count) { - if (tdb->allrecord_lock.ltype == F_WRLCK) { + if (tdb->file->allrecord_lock.count) { + if (tdb->file->allrecord_lock.ltype == F_WRLCK) { return 0; } return -1; } - return tdb1_brlock(tdb, F_WRLCK, off, 1, TDB1_LOCK_NOWAIT|TDB1_LOCK_PROBE); + return tdb1_brlock(tdb, F_WRLCK, off, 1, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE); } int tdb1_write_unlock_record(struct tdb1_context *tdb, tdb1_off_t off) { - if (tdb->allrecord_lock.count) { + if (tdb->file->allrecord_lock.count) { return 0; } return tdb1_brunlock(tdb, F_WRLCK, off, 1); @@ -711,7 +711,7 @@ int tdb1_unlock_record(struct tdb1_context *tdb, tdb1_off_t off) struct tdb1_traverse_lock *i; uint32_t count = 0; - if (tdb->allrecord_lock.count) { + if (tdb->file->allrecord_lock.count) { return 0; } @@ -725,10 +725,10 @@ int tdb1_unlock_record(struct tdb1_context *tdb, tdb1_off_t off) bool tdb1_have_extra_locks(struct tdb1_context *tdb) { - unsigned int extra = tdb->num_lockrecs; + unsigned int extra = tdb->file->num_lockrecs; /* A transaction holds the lock for all records. */ - if (!tdb->transaction && tdb->allrecord_lock.count) { + if (!tdb->transaction && tdb->file->allrecord_lock.count) { return true; } @@ -751,23 +751,23 @@ void tdb1_release_transaction_locks(struct tdb1_context *tdb) { unsigned int i, active = 0; - if (tdb->allrecord_lock.count != 0) { - tdb1_brunlock(tdb, tdb->allrecord_lock.ltype, TDB1_FREELIST_TOP, 0); - tdb->allrecord_lock.count = 0; + if (tdb->file->allrecord_lock.count != 0) { + tdb1_brunlock(tdb, tdb->file->allrecord_lock.ltype, TDB1_FREELIST_TOP, 0); + tdb->file->allrecord_lock.count = 0; } - for (i=0;inum_lockrecs;i++) { - struct tdb1_lock_type *lck = &tdb->lockrecs[i]; + for (i=0;ifile->num_lockrecs;i++) { + struct tdb_lock *lck = &tdb->file->lockrecs[i]; /* Don't release the active lock! Copy it to first entry. */ if (lck->off == TDB1_ACTIVE_LOCK) { - tdb->lockrecs[active++] = *lck; + tdb->file->lockrecs[active++] = *lck; } else { tdb1_brunlock(tdb, lck->ltype, lck->off, 1); } } - tdb->num_lockrecs = active; - if (tdb->num_lockrecs == 0) { - SAFE_FREE(tdb->lockrecs); + tdb->file->num_lockrecs = active; + if (tdb->file->num_lockrecs == 0) { + SAFE_FREE(tdb->file->lockrecs); } }