X-Git-Url: http://git.ozlabs.org/?a=blobdiff_plain;f=ccan%2Ftdb2%2Flock.c;h=4033d0afe813e59bf4dee4b8bcc9e5d34757c298;hb=5c4a21ab03a428373e7659b9606facf85dcbe17b;hp=82fcdf17d1e6dfd0126d17232826cc53391329ee;hpb=2960e90f2d036935273d163593839d3777be7980;p=ccan diff --git a/ccan/tdb2/lock.c b/ccan/tdb2/lock.c index 82fcdf17..4033d0af 100644 --- a/ccan/tdb2/lock.c +++ b/ccan/tdb2/lock.c @@ -30,39 +30,89 @@ #include /* If we were threaded, we could wait for unlock, but we're not, so fail. */ -static bool owner_conflict(struct tdb_context *tdb, struct tdb_lock *lock) +static enum TDB_ERROR owner_conflict(struct tdb_context *tdb, const char *call) { - if (lock->owner != tdb) { - tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR, - "Lock already owned by another opener"); + return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR, + "%s: lock owned by another tdb in this process.", + call); +} + +/* If we fork, we no longer really own locks: preserves errno */ +static bool check_lock_pid(struct tdb_context *tdb, + const char *call, bool log) +{ + /* No locks? No problem! */ + if (tdb->file->allrecord_lock.count == 0 + && tdb->file->num_lockrecs == 0) { return true; } + + /* No fork? No problem! */ + if (tdb->file->locker == getpid()) { + return true; + } + + if (log) { + tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR, + "%s: fork() detected after lock acquisition!" + " (%u vs %u)", call, tdb->file->locker, getpid()); + } return false; } -static int fcntl_lock(struct tdb_context *tdb, - int rw, off_t off, off_t len, bool waitflag) +int tdb_fcntl_lock(int fd, int rw, off_t off, off_t len, bool waitflag, + void *unused) { struct flock fl; + int ret; - fl.l_type = rw; - fl.l_whence = SEEK_SET; - fl.l_start = off; - fl.l_len = len; - fl.l_pid = 0; + do { + fl.l_type = rw; + fl.l_whence = SEEK_SET; + fl.l_start = off; + fl.l_len = len; - add_stat(tdb, lock_lowlevel, 1); - if (waitflag) - return fcntl(tdb->file->fd, F_SETLKW, &fl); - else { - add_stat(tdb, lock_nonblock, 1); - return fcntl(tdb->file->fd, F_SETLK, &fl); - } + if (waitflag) + ret = fcntl(fd, F_SETLKW, &fl); + else + ret = fcntl(fd, F_SETLK, &fl); + } while (ret != 0 && errno == EINTR); + return ret; } -static int fcntl_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len) +int tdb_fcntl_unlock(int fd, int rw, off_t off, off_t len, void *unused) { struct flock fl; + int ret; + + do { + fl.l_type = F_UNLCK; + fl.l_whence = SEEK_SET; + fl.l_start = off; + fl.l_len = len; + + ret = fcntl(fd, F_SETLKW, &fl); + } while (ret != 0 && errno == EINTR); + return ret; +} + +static int lock(struct tdb_context *tdb, + int rw, off_t off, off_t len, bool waitflag) +{ + if (tdb->file->allrecord_lock.count == 0 + && tdb->file->num_lockrecs == 0) { + tdb->file->locker = getpid(); + } + + tdb->stats.lock_lowlevel++; + if (!waitflag) + tdb->stats.lock_nonblock++; + return tdb->lock_fn(tdb->file->fd, rw, off, len, waitflag, + tdb->lock_data); +} + +static int unlock(struct tdb_context *tdb, int rw, off_t off, off_t len) +{ #if 0 /* Check they matched up locks and unlocks correctly. */ char line[80]; FILE *locks; @@ -121,13 +171,7 @@ static int fcntl_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len) fclose(locks); #endif - fl.l_type = F_UNLCK; - fl.l_whence = SEEK_SET; - fl.l_start = off; - fl.l_len = len; - fl.l_pid = 0; - - return fcntl(tdb->file->fd, F_SETLKW, &fl); + return tdb->unlock_fn(tdb->file->fd, rw, off, len, tdb->lock_data); } /* a byte range locking function - return 0 on success @@ -158,16 +202,13 @@ static enum TDB_ERROR tdb_brlock(struct tdb_context *tdb, (long long)(offset + len)); } - do { - ret = fcntl_lock(tdb, rw_type, offset, len, - flags & TDB_LOCK_WAIT); - } while (ret == -1 && errno == EINTR); - - if (ret == -1) { + ret = lock(tdb, rw_type, offset, len, flags & TDB_LOCK_WAIT); + if (ret != 0) { /* Generic lock error. errno set by fcntl. * EAGAIN is an expected return from non-blocking * locks. */ - if (!(flags & TDB_LOCK_PROBE) && errno != EAGAIN) { + if (!(flags & TDB_LOCK_PROBE) + && (errno != EAGAIN && errno != EINTR)) { tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR, "tdb_brlock failed (fd=%d) at" " offset %zu rw_type=%d flags=%d len=%zu:" @@ -189,16 +230,15 @@ static enum TDB_ERROR tdb_brunlock(struct tdb_context *tdb, return TDB_SUCCESS; } - do { - ret = fcntl_unlock(tdb, rw_type, offset, len); - } while (ret == -1 && errno == EINTR); + ret = unlock(tdb, rw_type, offset, len); - if (ret == -1) { + /* If we fail, *then* we verify that we owned the lock. If not, ok. */ + if (ret == -1 && check_lock_pid(tdb, "tdb_brunlock", false)) { return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR, "tdb_brunlock failed (fd=%d) at offset %zu" - " rw_type=%d len=%zu", + " rw_type=%d len=%zu: %s", tdb->file->fd, (size_t)offset, rw_type, - (size_t)len); + (size_t)len, strerror(errno)); } return TDB_SUCCESS; } @@ -213,6 +253,9 @@ enum TDB_ERROR tdb_allrecord_upgrade(struct tdb_context *tdb) { int count = 1000; + if (!check_lock_pid(tdb, "tdb_transaction_prepare_commit", true)) + return TDB_ERR_LOCK; + if (tdb->file->allrecord_lock.count != 1) { return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR, "tdb_allrecord_upgrade failed:" @@ -226,6 +269,10 @@ enum TDB_ERROR tdb_allrecord_upgrade(struct tdb_context *tdb) " already upgraded?"); } + if (tdb->file->allrecord_lock.owner != tdb) { + return owner_conflict(tdb, "tdb_allrecord_upgrade"); + } + while (count--) { struct timeval tv; if (tdb_brlock(tdb, F_WRLCK, @@ -243,8 +290,11 @@ enum TDB_ERROR tdb_allrecord_upgrade(struct tdb_context *tdb) tv.tv_usec = 1; select(0, NULL, NULL, NULL, &tv); } - return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR, - "tdb_allrecord_upgrade failed"); + + if (errno != EAGAIN && errno != EINTR) + tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR, + "tdb_allrecord_upgrade failed"); + return TDB_ERR_LOCK; } static struct tdb_lock *find_nestlock(struct tdb_context *tdb, tdb_off_t offset, @@ -266,6 +316,9 @@ enum TDB_ERROR tdb_lock_and_recover(struct tdb_context *tdb) { enum TDB_ERROR ecode; + if (!check_lock_pid(tdb, "tdb_transaction_prepare_commit", true)) + return TDB_ERR_LOCK; + ecode = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK, false); if (ecode != TDB_SUCCESS) { @@ -302,12 +355,17 @@ static enum TDB_ERROR tdb_nest_lock(struct tdb_context *tdb, if (tdb->flags & TDB_NOLOCK) return TDB_SUCCESS; - add_stat(tdb, locks, 1); + if (!check_lock_pid(tdb, "tdb_nest_lock", true)) { + return TDB_ERR_LOCK; + } + + tdb->stats.locks++; new_lck = find_nestlock(tdb, offset, NULL); if (new_lck) { - if (owner_conflict(tdb, new_lck)) - return -1; + if (new_lck->owner != tdb) { + return owner_conflict(tdb, "tdb_nest_lock"); + } if (new_lck->ltype == F_RDLCK && ltype == F_WRLCK) { return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR, @@ -320,12 +378,14 @@ static enum TDB_ERROR tdb_nest_lock(struct tdb_context *tdb, return TDB_SUCCESS; } +#if 0 if (tdb->file->num_lockrecs && offset >= TDB_HASH_LOCK_START && offset < TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE) { return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR, "tdb_nest_lock: already have a hash lock?"); } +#endif new_lck = (struct tdb_lock *)realloc( tdb->file->lockrecs, @@ -470,14 +530,24 @@ enum TDB_ERROR tdb_allrecord_lock(struct tdb_context *tdb, int ltype, enum TDB_ERROR ecode; tdb_bool_err berr; - if (tdb->file->allrecord_lock.count - && (ltype == F_RDLCK - || tdb->file->allrecord_lock.ltype == F_WRLCK)) { - tdb->file->allrecord_lock.count++; + if (tdb->flags & TDB_NOLOCK) return TDB_SUCCESS; + + if (!check_lock_pid(tdb, "tdb_allrecord_lock", true)) { + return TDB_ERR_LOCK; } if (tdb->file->allrecord_lock.count) { + if (tdb->file->allrecord_lock.owner != tdb) { + return owner_conflict(tdb, "tdb_allrecord_lock"); + } + + if (ltype == F_RDLCK + || tdb->file->allrecord_lock.ltype == F_WRLCK) { + tdb->file->allrecord_lock.count++; + return TDB_SUCCESS; + } + /* a global lock of a different type exists */ return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR, "tdb_allrecord_lock: already have %s lock", @@ -499,28 +569,19 @@ enum TDB_ERROR tdb_allrecord_lock(struct tdb_context *tdb, int ltype, " can't upgrade a write lock"); } - add_stat(tdb, locks, 1); + tdb->stats.locks++; again: /* Lock hashes, gradually. */ ecode = tdb_lock_gradual(tdb, ltype, flags, TDB_HASH_LOCK_START, TDB_HASH_LOCK_RANGE); - if (ecode != TDB_SUCCESS) { - if (!(flags & TDB_LOCK_PROBE)) { - tdb_logerr(tdb, ecode, TDB_LOG_ERROR, - "tdb_allrecord_lock hashes failed"); - } + if (ecode != TDB_SUCCESS) return ecode; - } /* Lock free tables: there to end of file. */ ecode = tdb_brlock(tdb, ltype, TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE, 0, flags); if (ecode != TDB_SUCCESS) { - if (!(flags & TDB_LOCK_PROBE)) { - tdb_logerr(tdb, ecode, TDB_LOG_ERROR, - "tdb_allrecord_lock freetables failed"); - } tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START, TDB_HASH_LOCK_RANGE); return ecode; @@ -582,6 +643,9 @@ void tdb_unlock_expand(struct tdb_context *tdb, int ltype) /* unlock entire db */ void tdb_allrecord_unlock(struct tdb_context *tdb, int ltype) { + if (tdb->flags & TDB_NOLOCK) + return; + if (tdb->file->allrecord_lock.count == 0) { tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR, "tdb_allrecord_unlock: not locked!"); @@ -654,16 +718,21 @@ enum TDB_ERROR tdb_lock_hashes(struct tdb_context *tdb, int ltype, enum tdb_lock_flags waitflag) { /* FIXME: Do this properly, using hlock_range */ - unsigned lock = TDB_HASH_LOCK_START + unsigned l = TDB_HASH_LOCK_START + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS)); /* a allrecord lock allows us to avoid per chain locks */ - if (tdb->file->allrecord_lock.count && - (ltype == tdb->file->allrecord_lock.ltype || ltype == F_RDLCK)) { - return TDB_SUCCESS; - } - if (tdb->file->allrecord_lock.count) { + if (!check_lock_pid(tdb, "tdb_lock_hashes", true)) + return TDB_ERR_LOCK; + + if (tdb->file->allrecord_lock.owner != tdb) + return owner_conflict(tdb, "tdb_lock_hashes"); + if (ltype == tdb->file->allrecord_lock.ltype + || ltype == F_RDLCK) { + return TDB_SUCCESS; + } + return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR, "tdb_lock_hashes:" " already have %s allrecordlock", @@ -682,14 +751,14 @@ enum TDB_ERROR tdb_lock_hashes(struct tdb_context *tdb, " already have expansion lock"); } - return tdb_nest_lock(tdb, lock, ltype, waitflag); + return tdb_nest_lock(tdb, l, ltype, waitflag); } enum TDB_ERROR tdb_unlock_hashes(struct tdb_context *tdb, tdb_off_t hash_lock, tdb_len_t hash_range, int ltype) { - unsigned lock = TDB_HASH_LOCK_START + unsigned l = TDB_HASH_LOCK_START + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS)); if (tdb->flags & TDB_NOLOCK) @@ -705,7 +774,7 @@ enum TDB_ERROR tdb_unlock_hashes(struct tdb_context *tdb, return TDB_SUCCESS; } - return tdb_nest_unlock(tdb, lock, ltype); + return tdb_nest_unlock(tdb, l, ltype); } /* Hash locks use TDB_HASH_LOCK_START + the next 30 bits. @@ -729,6 +798,9 @@ enum TDB_ERROR tdb_lock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off, /* a allrecord lock allows us to avoid per chain locks */ if (tdb->file->allrecord_lock.count) { + if (!check_lock_pid(tdb, "tdb_lock_free_bucket", true)) + return TDB_ERR_LOCK; + if (tdb->file->allrecord_lock.ltype == F_WRLCK) return 0; return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR, @@ -755,7 +827,27 @@ void tdb_unlock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off) tdb_nest_unlock(tdb, free_lock_off(b_off), F_WRLCK); } -void tdb_unlock_all(struct tdb_context *tdb) +enum TDB_ERROR tdb_lockall(struct tdb_context *tdb) +{ + return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false); +} + +void tdb_unlockall(struct tdb_context *tdb) +{ + tdb_allrecord_unlock(tdb, F_WRLCK); +} + +enum TDB_ERROR tdb_lockall_read(struct tdb_context *tdb) +{ + return tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false); +} + +void tdb_unlockall_read(struct tdb_context *tdb) +{ + tdb_allrecord_unlock(tdb, F_RDLCK); +} + +void tdb_lock_cleanup(struct tdb_context *tdb) { unsigned int i;