This was redundant before this patch series: it mirrored num_lockrecs
exactly. It still does.
Also, skip useless branch when locks == 1: unconditional assignment is
cheaper anyway.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
tdb->lockrecs[tdb->num_lockrecs].off = offset;
tdb->lockrecs[tdb->num_lockrecs].count = 1;
tdb->lockrecs[tdb->num_lockrecs].ltype = ltype;
tdb->lockrecs[tdb->num_lockrecs].off = offset;
tdb->lockrecs[tdb->num_lockrecs].count = 1;
tdb->lockrecs[tdb->num_lockrecs].ltype = ltype;
- tdb->num_lockrecs += 1;
} else {
ret = tdb->methods->brunlock(tdb, ltype, offset, 1);
}
} else {
ret = tdb->methods->brunlock(tdb, ltype, offset, 1);
}
/*
* Shrink the array by overwriting the element just unlocked with the
* last array element.
*/
/*
* Shrink the array by overwriting the element just unlocked with the
* last array element.
*/
-
- if (tdb->num_lockrecs > 1) {
- *lck = tdb->lockrecs[tdb->num_lockrecs-1];
- }
- tdb->num_lockrecs -= 1;
+ *lck = tdb->lockrecs[--tdb->num_lockrecs];
/*
* We don't bother with realloc when the array shrinks, but if we have
/*
* We don't bother with realloc when the array shrinks, but if we have
tdb_brunlock(tdb, lck->ltype, lck->off, 1);
}
}
tdb_brunlock(tdb, lck->ltype, lck->off, 1);
}
}
- tdb->num_locks = extra;
tdb->num_lockrecs = extra;
if (tdb->num_lockrecs == 0) {
SAFE_FREE(tdb->lockrecs);
tdb->num_lockrecs = extra;
if (tdb->num_lockrecs == 0) {
SAFE_FREE(tdb->lockrecs);
struct tdb_logging_context log;
unsigned int (*hash_fn)(TDB_DATA *key);
int open_flags; /* flags used in the open - needed by reopen */
struct tdb_logging_context log;
unsigned int (*hash_fn)(TDB_DATA *key);
int open_flags; /* flags used in the open - needed by reopen */
- unsigned int num_locks; /* number of chain locks held */
const struct tdb_methods *methods;
struct tdb_transaction *transaction;
int page_size;
const struct tdb_methods *methods;
struct tdb_transaction *transaction;
int page_size;