X-Git-Url: https://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Ftdb2%2Ftdb.c;h=d19ceda239d50e71681607d39487d19187b61ac9;hp=43d1ef263d44d12cafbf23446e5868af2d1a9691;hb=cbfcfe53cb6184d76d2343b224409ee67841f458;hpb=ebdd6451e2d7aa185e62a59fa2c72ffe36772d9a diff --git a/ccan/tdb2/tdb.c b/ccan/tdb2/tdb.c index 43d1ef26..d19ceda2 100644 --- a/ccan/tdb2/tdb.c +++ b/ccan/tdb2/tdb.c @@ -21,29 +21,35 @@ null_log_fn(struct tdb_context *tdb, /* We do a lot of work assuming our copy of the header volatile area * is uptodate, and usually it is. However, once we grab a lock, we have to * re-check it. */ -bool update_header(struct tdb_context *tdb) +bool header_changed(struct tdb_context *tdb) { - struct tdb_header_volatile pad, *v; + uint64_t gen; - if (tdb->header_uptodate) { + if (!(tdb->flags & TDB_NOLOCK) && tdb->header_uptodate) { tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv, "warning: header uptodate already\n"); } /* We could get a partial update if we're not holding any locks. */ - assert(tdb_has_locks(tdb)); + assert((tdb->flags & TDB_NOLOCK) || tdb_has_locks(tdb)); - v = tdb_get(tdb, offsetof(struct tdb_header, v), &pad, sizeof(*v)); - if (!v) { - /* On failure, imply we updated header so they retry. */ - return true; - } tdb->header_uptodate = true; - if (likely(memcmp(&tdb->header.v, v, sizeof(*v)) == 0)) { - return false; + gen = tdb_read_off(tdb, offsetof(struct tdb_header, v.generation)); + if (unlikely(gen != tdb->header.v.generation)) { + tdb_read_convert(tdb, offsetof(struct tdb_header, v), + &tdb->header.v, sizeof(tdb->header.v)); + return true; } - tdb->header.v = *v; - return true; + return false; +} + +int write_header(struct tdb_context *tdb) +{ + assert(tdb_read_off(tdb, offsetof(struct tdb_header, v.generation)) + == tdb->header.v.generation); + tdb->header.v.generation++; + return tdb_write_convert(tdb, offsetof(struct tdb_header, v), + &tdb->header.v, sizeof(tdb->header.v)); } static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed, @@ -207,27 +213,47 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, uint64_t hash_test; unsigned v; - if (!(tdb = (struct tdb_context *)calloc(1, sizeof *tdb))) { + tdb = malloc(sizeof(*tdb)); + if (!tdb) { /* Can't log this */ errno = ENOMEM; goto fail; } - tdb->fd = -1; tdb->name = NULL; tdb->map_ptr = NULL; + tdb->fd = -1; + /* map_size will be set below. */ + tdb->ecode = TDB_SUCCESS; + /* header will be read in below. */ + tdb->header_uptodate = false; tdb->flags = tdb_flags; tdb->log = null_log_fn; tdb->log_priv = NULL; tdb->khash = jenkins_hash; tdb->hash_priv = NULL; + tdb->transaction = NULL; + /* last_zone will be set below. */ tdb_io_init(tdb); + tdb_lock_init(tdb); - /* FIXME */ - if (attr) { - tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, - "tdb_open: attributes not yet supported\n"); - errno = EINVAL; - goto fail; + while (attr) { + switch (attr->base.attr) { + case TDB_ATTRIBUTE_LOG: + tdb->log = attr->log.log_fn; + tdb->log_priv = attr->log.log_private; + break; + case TDB_ATTRIBUTE_HASH: + tdb->khash = attr->hash.hash_fn; + tdb->hash_priv = attr->hash.hash_private; + break; + default: + tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, + "tdb_open: unknown attribute type %u\n", + attr->base.attr); + errno = EINVAL; + goto fail; + } + attr = attr->base.next; } if ((open_flags & O_ACCMODE) == O_WRONLY) { @@ -238,12 +264,13 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, } if ((open_flags & O_ACCMODE) == O_RDONLY) { - tdb->read_only = 1; + tdb->read_only = true; /* read only databases don't do locking */ tdb->flags |= TDB_NOLOCK; - } + } else + tdb->read_only = false; - /* internal databases don't mmap or lock */ + /* internal databases don't need any of the rest. */ if (tdb->flags & TDB_INTERNAL) { tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP); if (tdb_new_database(tdb) != 0) { @@ -253,7 +280,9 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, } TEST_IT(tdb->flags & TDB_CONVERT); tdb_convert(tdb, &tdb->header, sizeof(tdb->header)); - goto internal; + /* Zones don't matter for internal db. */ + tdb->last_zone = 0; + return tdb; } if ((tdb->fd = open(name, open_flags, mode)) == -1) { @@ -331,15 +360,10 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, tdb->map_size = st.st_size; tdb->device = st.st_dev; tdb->inode = st.st_ino; - tdb_io_init(tdb); tdb_mmap(tdb); - - internal: - /* Internal (memory-only) databases skip all the code above to - * do with disk files, and resume here by releasing their - * open lock and hooking into the active list. */ tdb_unlock_open(tdb); - tdb->last_zone = random_free_zone(tdb); + tdb_zone_init(tdb); + tdb->next = tdbs; tdbs = tdb; return tdb; @@ -370,140 +394,166 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, return NULL; } -static int tdb_key_compare(TDB_DATA key, TDB_DATA data, void *private_data) -{ - return memcmp(data.dptr, key.dptr, data.dsize) == 0; -} - -static void unlock_lists(struct tdb_context *tdb, - uint64_t start, uint64_t end, int ltype) +static tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list) { - do { - tdb_unlock_list(tdb, start, ltype); - start = (start + ((1ULL << tdb->header.v.hash_bits) - 1)) - & ((1ULL << tdb->header.v.hash_bits) - 1); - } while (start != end); + return tdb->header.v.hash_off + + ((list & ((1ULL << tdb->header.v.hash_bits) - 1)) + * sizeof(tdb_off_t)); } -/* FIXME: Return header copy? */ -/* Returns -1 or offset of entry (0 if not found). - * Locks hash entried from *start to *end (where the entry was found). */ -static tdb_off_t find_bucket_and_lock(struct tdb_context *tdb, - const struct tdb_data *key, - uint64_t hash, - uint64_t *start, - uint64_t *end, - uint64_t *room, - int ltype) +/* Returns 0 if the entry is a zero (definitely not a match). + * Returns a valid entry offset if it's a match. Fills in rec. + * Otherwise returns TDB_OFF_ERR: keep searching. */ +static tdb_off_t entry_matches(struct tdb_context *tdb, + uint64_t list, + uint64_t hash, + const struct tdb_data *key, + struct tdb_used_record *rec) { - uint64_t hextra; tdb_off_t off; + uint64_t keylen; + const unsigned char *rkey; - /* hash_bits might be out of date... */ -again: - *start = *end = hash & ((1ULL << tdb->header.v.hash_bits) - 1); - hextra = hash >> tdb->header.v.hash_bits; + list &= ((1ULL << tdb->header.v.hash_bits) - 1); - /* FIXME: can we avoid locks for some fast paths? */ - if (tdb_lock_list(tdb, *end, ltype, TDB_LOCK_WAIT) == -1) + off = tdb_read_off(tdb, tdb->header.v.hash_off + + list * sizeof(tdb_off_t)); + if (off == 0 || off == TDB_OFF_ERR) + return off; + +#if 0 /* FIXME: Check other bits. */ + unsigned int bits, bitmask, hoffextra; + /* Bottom three bits show how many extra hash bits. */ + bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1; + bitmask = (1 << bits)-1; + hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask); + uint64_t hextra = hash >> tdb->header.v.hash_bits; + if ((hextra & bitmask) != hoffextra) return TDB_OFF_ERR; + off &= ~...; +#endif - /* We only need to check this for first lock. */ - if (unlikely(update_header(tdb))) { - tdb_unlock_list(tdb, *end, ltype); - goto again; - } + if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1) + return TDB_OFF_ERR; + + /* FIXME: check extra bits in header! */ + keylen = rec_key_length(rec); + if (keylen != key->dsize) + return TDB_OFF_ERR; - while ((off = tdb_read_off(tdb, tdb->header.v.hash_off - + *end * sizeof(tdb_off_t))) - != TDB_OFF_ERR) { - struct tdb_used_record pad, *r; - uint64_t keylen, next; + rkey = tdb_access_read(tdb, off + sizeof(*rec), keylen, false); + if (!rkey) + return TDB_OFF_ERR; + if (memcmp(rkey, key->dptr, keylen) != 0) + off = TDB_OFF_ERR; + tdb_access_release(tdb, rkey); + return off; +} - /* Didn't find it? */ - if (!off) - return 0; +/* FIXME: Optimize? */ +static void unlock_lists(struct tdb_context *tdb, + tdb_off_t list, tdb_len_t num, + int ltype) +{ + tdb_off_t i; -#if 0 /* FIXME: Check other bits. */ - unsigned int bits, bitmask, hoffextra; - /* Bottom three bits show how many extra hash bits. */ - bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1; - bitmask = (1 << bits)-1; - hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask); - if ((hextra & bitmask) != hoffextra) - goto lock_next; -#endif + for (i = list; i < list + num; i++) + tdb_unlock_list(tdb, i, ltype); +} - r = tdb_get(tdb, off, &pad, sizeof(*r)); - if (!r) - goto unlock_err; +/* FIXME: Optimize? */ +static int lock_lists(struct tdb_context *tdb, + tdb_off_t list, tdb_len_t num, + int ltype) +{ + tdb_off_t i; - if (rec_magic(r) != TDB_MAGIC) { - tdb->ecode = TDB_ERR_CORRUPT; - tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, - "find_bucket_and_lock: bad magic 0x%llx" - " at offset %llu!\n", - (long long)rec_magic(r), (long long)off); - goto unlock_err; + for (i = list; i < list + num; i++) { + if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT) + == TDB_OFF_ERR) { + unlock_lists(tdb, list, i - list, ltype); + return -1; } + } + return 0; +} - /* FIXME: check extra bits in header! */ - keylen = rec_key_length(r); - if (keylen != key->dsize) - goto lock_next; - - switch (tdb_parse_data(tdb, *key, off + sizeof(*r), key->dsize, - tdb_key_compare, NULL)) { - case 1: - /* Match! */ - *room = rec_data_length(r) + rec_extra_padding(r); - return off >> TDB_EXTRA_HASHBITS_NUM; - case 0: - break; - default: - goto unlock_err; +/* We lock hashes up to the next empty offset. We already hold the + * lock on the start bucket, but we may need to release and re-grab + * it. If we fail, we hold no locks at all! */ +static tdb_len_t relock_hash_to_zero(struct tdb_context *tdb, + tdb_off_t start, int ltype) +{ + tdb_len_t num, len; + +again: + num = 1ULL << tdb->header.v.hash_bits; + len = tdb_find_zero_off(tdb, hash_off(tdb, start), num - start); + if (unlikely(len == num - start)) { + /* We hit the end of the hash range. Drop lock: we have + to lock start of hash first. */ + tdb_len_t pre_locks; + + tdb_unlock_list(tdb, start, ltype); + + /* Grab something, so header is stable. */ + if (tdb_lock_list(tdb, 0, ltype, TDB_LOCK_WAIT)) + return TDB_OFF_ERR; + pre_locks = tdb_find_zero_off(tdb, hash_off(tdb, 0), num); + /* We want to lock the zero entry as well. */ + pre_locks++; + if (lock_lists(tdb, 1, pre_locks - 1, ltype) == -1) { + tdb_unlock_list(tdb, 0, ltype); + return TDB_OFF_ERR; } - lock_next: - /* Lock next bucket. */ - /* FIXME: We can deadlock if this wraps! */ - next = (*end + 1) & ((1ULL << tdb->header.v.hash_bits) - 1); - if (next == *start) { - tdb->ecode = TDB_ERR_CORRUPT; - tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, - "find_bucket_and_lock: full hash table!\n"); - goto unlock_err; + /* Now lock later ones. */ + if (unlikely(lock_lists(tdb, start, len, ltype) == -1)) { + unlock_lists(tdb, 0, pre_locks, ltype); + return TDB_OFF_ERR; + } + len += pre_locks; + } else { + /* We want to lock the zero entry as well. */ + len++; + /* But we already have lock on start. */ + if (unlikely(lock_lists(tdb, start+1, len-1, ltype) == -1)) { + tdb_unlock_list(tdb, start, ltype); + return TDB_OFF_ERR; } - if (tdb_lock_list(tdb, next, ltype, TDB_LOCK_WAIT) == -1) - goto unlock_err; - *end = next; } -unlock_err: - TEST_IT(*end < *start); - unlock_lists(tdb, *start, *end, ltype); - return TDB_OFF_ERR; + /* Now, did we lose the race, and it's not zero any more? */ + if (unlikely(tdb_read_off(tdb, hash_off(tdb, start + len - 1)) != 0)) { + /* Leave the start locked, as expected. */ + unlock_lists(tdb, start + 1, len - 1, ltype); + goto again; + } + + return len; } +/* FIXME: modify, don't rewrite! */ static int update_rec_hdr(struct tdb_context *tdb, tdb_off_t off, tdb_len_t keylen, tdb_len_t datalen, - tdb_len_t room, + struct tdb_used_record *rec, uint64_t h) { - struct tdb_used_record rec; + uint64_t dataroom = rec_data_length(rec) + rec_extra_padding(rec); - if (set_header(tdb, &rec, keylen, datalen, room - datalen, h)) + if (set_header(tdb, rec, keylen, datalen, keylen + dataroom, h)) return -1; - return tdb_write_convert(tdb, off, &rec, sizeof(rec)); + return tdb_write_convert(tdb, off, rec, sizeof(*rec)); } /* If we fail, others will try after us. */ static void enlarge_hash(struct tdb_context *tdb) { - tdb_off_t newoff, i; + tdb_off_t newoff, oldoff, i; + tdb_len_t hlen; uint64_t h, num = 1ULL << tdb->header.v.hash_bits; struct tdb_used_record pad, *r; @@ -511,32 +561,36 @@ static void enlarge_hash(struct tdb_context *tdb) if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1) return; - if (unlikely(update_header(tdb))) { - /* Someone else enlarged for us? Nothing to do. */ - if ((1ULL << tdb->header.v.hash_bits) != num) - goto unlock; - } + /* Someone else enlarged for us? Nothing to do. */ + if ((1ULL << tdb->header.v.hash_bits) != num) + goto unlock; - newoff = alloc(tdb, 0, num * 2, 0, false); + /* Allocate our new array. */ + hlen = num * sizeof(tdb_off_t) * 2; + newoff = alloc(tdb, 0, hlen, 0, false); if (unlikely(newoff == TDB_OFF_ERR)) goto unlock; if (unlikely(newoff == 0)) { - if (tdb_expand(tdb, 0, num * 2, false) == -1) + if (tdb_expand(tdb, 0, hlen, false) == -1) goto unlock; - newoff = alloc(tdb, 0, num * 2, 0, false); + newoff = alloc(tdb, 0, hlen, 0, false); if (newoff == TDB_OFF_ERR || newoff == 0) goto unlock; } + /* Step over record header! */ + newoff += sizeof(struct tdb_used_record); + + /* Starts all zero. */ + if (zero_out(tdb, newoff, hlen) == -1) + goto unlock; /* FIXME: If the space before is empty, we know this is in its ideal - * location. We can steal a bit from the pointer to avoid rehash. */ - for (i = tdb_find_nonzero_off(tdb, tdb->header.v.hash_off, num); + * location. Or steal a bit from the pointer to avoid rehash. */ + for (i = tdb_find_nonzero_off(tdb, hash_off(tdb, 0), num); i < num; - i += tdb_find_nonzero_off(tdb, tdb->header.v.hash_off - + i*sizeof(tdb_off_t), num - i)) { + i += tdb_find_nonzero_off(tdb, hash_off(tdb, i), num - i)) { tdb_off_t off; - off = tdb_read_off(tdb, tdb->header.v.hash_off - + i*sizeof(tdb_off_t)); + off = tdb_read_off(tdb, hash_off(tdb, i)); if (unlikely(off == TDB_OFF_ERR)) goto unlock; if (unlikely(!off)) { @@ -545,27 +599,32 @@ static void enlarge_hash(struct tdb_context *tdb) "find_bucket_and_lock: zero hash bucket!\n"); goto unlock; } - h = hash_record(tdb, off); + + /* Find next empty hash slot. */ + for (h = hash_record(tdb, off); + tdb_read_off(tdb, newoff + (h & ((num * 2)-1)) + * sizeof(tdb_off_t)) != 0; + h++); + /* FIXME: Encode extra hash bits! */ - if (tdb_write_off(tdb, newoff - + (h & ((num * 2) - 1)) * sizeof(uint64_t), - off) == -1) + if (tdb_write_off(tdb, newoff + (h & ((num * 2)-1)) + * sizeof(tdb_off_t), off) == -1) goto unlock; + i++; } /* Free up old hash. */ - r = tdb_get(tdb, tdb->header.v.hash_off, &pad, sizeof(*r)); + oldoff = tdb->header.v.hash_off - sizeof(*r); + r = tdb_get(tdb, oldoff, &pad, sizeof(*r)); if (!r) goto unlock; - add_free_record(tdb, tdb->header.v.hash_off, - rec_data_length(r) + rec_extra_padding(r)); + add_free_record(tdb, oldoff, + sizeof(*r)+rec_data_length(r)+rec_extra_padding(r)); /* Now we write the modified header. */ - tdb->header.v.generation++; tdb->header.v.hash_bits++; tdb->header.v.hash_off = newoff; - tdb_write_convert(tdb, offsetof(struct tdb_header, v), - &tdb->header.v, sizeof(tdb->header.v)); + write_header(tdb); unlock: tdb_allrecord_unlock(tdb, F_WRLCK); } @@ -573,15 +632,43 @@ unlock: int tdb_store(struct tdb_context *tdb, struct tdb_data key, struct tdb_data dbuf, int flag) { - tdb_off_t new_off, off, start, end, room; + tdb_off_t new_off, off, old_bucket, start, num_locks = 1; + struct tdb_used_record rec; uint64_t h; bool growing = false; h = tdb_hash(tdb, key.dptr, key.dsize); - off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_WRLCK); - if (off == TDB_OFF_ERR) + + /* FIXME: can we avoid locks for some fast paths? */ + start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT); + if (start == TDB_OFF_ERR) return -1; + /* Fast path. */ + old_bucket = start; + off = entry_matches(tdb, start, h, &key, &rec); + if (unlikely(off == TDB_OFF_ERR)) { + /* Slow path, need to grab more locks and search. */ + tdb_off_t i; + + /* Warning: this may drop the lock! Does that on error. */ + num_locks = relock_hash_to_zero(tdb, start, F_WRLCK); + if (num_locks == TDB_OFF_ERR) + return -1; + + for (i = start; i < start + num_locks; i++) { + off = entry_matches(tdb, i, h, &key, &rec); + /* Empty entry or we found it? */ + if (off == 0 || off != TDB_OFF_ERR) + break; + } + if (i == start + num_locks) + off = 0; + + /* Even if not found, this is where we put the new entry. */ + old_bucket = i; + } + /* Now we have lock on this hash bucket. */ if (flag == TDB_INSERT) { if (off) { @@ -590,11 +677,12 @@ int tdb_store(struct tdb_context *tdb, } } else { if (off) { - if (room >= key.dsize + dbuf.dsize) { + if (rec_data_length(&rec) + rec_extra_padding(&rec) + >= dbuf.dsize) { new_off = off; if (update_rec_hdr(tdb, off, key.dsize, dbuf.dsize, - room, h)) + &rec, h)) goto fail; goto write; } @@ -615,7 +703,7 @@ int tdb_store(struct tdb_context *tdb, /* Allocate a new record. */ new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing); if (new_off == 0) { - unlock_lists(tdb, start, end, F_WRLCK); + unlock_lists(tdb, start, num_locks, F_WRLCK); /* Expand, then try again... */ if (tdb_expand(tdb, key.dsize, dbuf.dsize, growing) == -1) return -1; @@ -625,15 +713,16 @@ int tdb_store(struct tdb_context *tdb, /* We didn't like the existing one: remove it. */ if (off) { add_free_record(tdb, off, sizeof(struct tdb_used_record) - + key.dsize + room); + + rec_key_length(&rec) + + rec_data_length(&rec) + + rec_extra_padding(&rec)); } -write: - off = tdb->header.v.hash_off + end * sizeof(tdb_off_t); /* FIXME: Encode extra hash bits! */ - if (tdb_write_off(tdb, off, new_off) == -1) + if (tdb_write_off(tdb, hash_off(tdb, old_bucket), new_off) == -1) goto fail; +write: off = new_off + sizeof(struct tdb_used_record); if (tdb->methods->write(tdb, off, key.dptr, key.dsize) == -1) goto fail; @@ -642,48 +731,65 @@ write: goto fail; /* FIXME: tdb_increment_seqnum(tdb); */ - unlock_lists(tdb, start, end, F_WRLCK); + unlock_lists(tdb, start, num_locks, F_WRLCK); - /* By simple trial and error, this roughly approximates a 60% - * full measure. */ - if (unlikely(end - start > 4 * tdb->header.v.hash_bits - 32)) + /* FIXME: by simple simulation, this approximated 60% full. + * Check in real case! */ + if (unlikely(num_locks > 4 * tdb->header.v.hash_bits - 30)) enlarge_hash(tdb); return 0; fail: - unlock_lists(tdb, start, end, F_WRLCK); + unlock_lists(tdb, start, num_locks, F_WRLCK); return -1; } struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key) { - tdb_off_t off, start, end, room; + tdb_off_t off, start, num_locks = 1; + struct tdb_used_record rec; uint64_t h; - struct tdb_used_record pad, *r; struct tdb_data ret; h = tdb_hash(tdb, key.dptr, key.dsize); - off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_RDLCK); - if (off == TDB_OFF_ERR) - return tdb_null; - if (!off) { - unlock_lists(tdb, start, end, F_RDLCK); - tdb->ecode = TDB_SUCCESS; + /* FIXME: can we avoid locks for some fast paths? */ + start = tdb_lock_list(tdb, h, F_RDLCK, TDB_LOCK_WAIT); + if (start == TDB_OFF_ERR) return tdb_null; + + /* Fast path. */ + off = entry_matches(tdb, start, h, &key, &rec); + if (unlikely(off == TDB_OFF_ERR)) { + /* Slow path, need to grab more locks and search. */ + tdb_off_t i; + + /* Warning: this may drop the lock! Does that on error. */ + num_locks = relock_hash_to_zero(tdb, start, F_RDLCK); + if (num_locks == TDB_OFF_ERR) + return tdb_null; + + for (i = start; i < start + num_locks; i++) { + off = entry_matches(tdb, i, h, &key, &rec); + /* Empty entry or we found it? */ + if (off == 0 || off != TDB_OFF_ERR) + break; + } + if (i == start + num_locks) + off = 0; } - r = tdb_get(tdb, off, &pad, sizeof(*r)); - if (!r) { - unlock_lists(tdb, start, end, F_RDLCK); + if (!off) { + unlock_lists(tdb, start, num_locks, F_RDLCK); + tdb->ecode = TDB_ERR_NOEXIST; return tdb_null; } - ret.dsize = rec_data_length(r); - ret.dptr = tdb_alloc_read(tdb, off + sizeof(*r) + key.dsize, + ret.dsize = rec_data_length(&rec); + ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize, ret.dsize); - unlock_lists(tdb, start, end, F_RDLCK); + unlock_lists(tdb, start, num_locks, F_RDLCK); return ret; } @@ -691,154 +797,128 @@ static int hash_add(struct tdb_context *tdb, uint64_t h, tdb_off_t off) { tdb_off_t i, hoff, len, num; + /* Look for next space. */ i = (h & ((1ULL << tdb->header.v.hash_bits) - 1)); - hoff = tdb->header.v.hash_off + i * sizeof(tdb_off_t); len = (1ULL << tdb->header.v.hash_bits) - i; + num = tdb_find_zero_off(tdb, hash_off(tdb, i), len); - /* Look for next space. */ - num = tdb_find_zero_off(tdb, hoff, len); if (unlikely(num == len)) { - hoff = tdb->header.v.hash_off; + /* We wrapped. Look through start of hash table. */ + hoff = hash_off(tdb, 0); len = (1ULL << tdb->header.v.hash_bits); num = tdb_find_zero_off(tdb, hoff, len); - if (i == len) + if (i == len) { + tdb->ecode = TDB_ERR_CORRUPT; + tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, + "hash_add: full hash table!\n"); return -1; + } } /* FIXME: Encode extra hash bits! */ - return tdb_write_off(tdb, hoff + num * sizeof(tdb_off_t), off); + return tdb_write_off(tdb, hash_off(tdb, i + num), off); } -static int unlink_used_record(struct tdb_context *tdb, tdb_off_t chain, - uint64_t *extra_locks) +int tdb_delete(struct tdb_context *tdb, struct tdb_data key) { - tdb_off_t num, len, i, hoff; + tdb_off_t i, old_bucket, off, start, num_locks = 1; + struct tdb_used_record rec; + uint64_t h; - /* FIXME: Maybe lock more in search? Maybe don't lock if scan - * finds none? */ -again: - len = (1ULL << tdb->header.v.hash_bits) - (chain + 1); - hoff = tdb->header.v.hash_off + (chain + 1) * sizeof(tdb_off_t); - num = tdb_find_zero_off(tdb, hoff, len); - - /* We want to lock the zero entry, too. In the wrap case, - * this locks one extra. That's harmless. */ - num++; - - for (i = chain + 1; i < chain + 1 + num; i++) { - if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_WAIT) == -1) { - if (i != chain + 1) - unlock_lists(tdb, chain + 1, i-1, F_WRLCK); + h = tdb_hash(tdb, key.dptr, key.dsize); + + /* FIXME: can we avoid locks for some fast paths? */ + start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT); + if (start == TDB_OFF_ERR) + return -1; + + /* Fast path. */ + old_bucket = start; + off = entry_matches(tdb, start, h, &key, &rec); + if (off && off != TDB_OFF_ERR) { + /* We can only really fastpath delete if next bucket + * is 0. Note that we haven't locked it, but our lock + * on this bucket stops anyone overflowing into it + * while we look. */ + if (tdb_read_off(tdb, hash_off(tdb, h+1)) == 0) + goto delete; + /* Slow path. */ + off = TDB_OFF_ERR; + } + + if (unlikely(off == TDB_OFF_ERR)) { + /* Slow path, need to grab more locks and search. */ + tdb_off_t i; + + /* Warning: this may drop the lock! Does that on error. */ + num_locks = relock_hash_to_zero(tdb, start, F_WRLCK); + if (num_locks == TDB_OFF_ERR) return -1; - } - } - /* The wrap case: we need those locks out of order! */ - if (unlikely(num == len + 1)) { - *extra_locks = tdb_find_zero_off(tdb, tdb->header.v.hash_off, - 1ULL << tdb->header.v.hash_bits); - (*extra_locks)++; - for (i = 0; i < *extra_locks; i++) { - if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_NOWAIT)) { - /* Failed. Caller must lock in order. */ - if (i) - unlock_lists(tdb, 0, i-1, F_WRLCK); - unlock_lists(tdb, chain + 1, chain + num, - F_WRLCK); - return 1; + for (i = start; i < start + num_locks; i++) { + off = entry_matches(tdb, i, h, &key, &rec); + /* Empty entry or we found it? */ + if (off == 0 || off != TDB_OFF_ERR) { + old_bucket = i; + break; } } - num += *extra_locks; + if (i == start + num_locks) + off = 0; } - /* Now we have the locks, be certain that offset is still 0! */ - hoff = tdb->header.v.hash_off - + (((chain + num) * sizeof(tdb_off_t)) - & ((1ULL << tdb->header.v.hash_bits) - 1)); - - if (unlikely(tdb_read_off(tdb, hoff) != 0)) { - unlock_lists(tdb, chain + 1, chain + num, F_WRLCK); - goto again; + if (!off) { + unlock_lists(tdb, start, num_locks, F_WRLCK); + tdb->ecode = TDB_ERR_NOEXIST; + return -1; } - /* OK, all locked. Unlink first one. */ - hoff = tdb->header.v.hash_off + chain * sizeof(tdb_off_t); - if (tdb_write_off(tdb, hoff, 0) == -1) +delete: + /* This actually unlinks it. */ + if (tdb_write_off(tdb, hash_off(tdb, old_bucket), 0) == -1) goto unlock_err; - /* Rehash the rest. */ - for (i = 1; i < num; i++) { - tdb_off_t off; - uint64_t h; + /* Rehash anything following. */ + for (i = hash_off(tdb, old_bucket+1); + i != hash_off(tdb, h + num_locks); + i += sizeof(tdb_off_t)) { + tdb_off_t off2; + uint64_t h2; - hoff = tdb->header.v.hash_off - + (((chain + i) * sizeof(tdb_off_t)) - & ((1ULL << tdb->header.v.hash_bits) - 1)); - off = tdb_read_off(tdb, hoff); - if (unlikely(off == TDB_OFF_ERR)) + off2 = tdb_read_off(tdb, i); + if (unlikely(off2 == TDB_OFF_ERR)) goto unlock_err; /* Maybe use a bit to indicate it is in ideal place? */ - h = hash_record(tdb, off); + h2 = hash_record(tdb, off2); /* Is it happy where it is? */ - if ((h & ((1ULL << tdb->header.v.hash_bits)-1)) == (chain + i)) + if (hash_off(tdb, h2) == i) continue; /* Remove it. */ - if (tdb_write_off(tdb, hoff, 0) == -1) + if (tdb_write_off(tdb, i, 0) == -1) goto unlock_err; /* Rehash it. */ - if (hash_add(tdb, h, off) == -1) + if (hash_add(tdb, h2, off2) == -1) goto unlock_err; } - unlock_lists(tdb, chain + 1, chain + num, F_WRLCK); + + /* Free the deleted entry. */ + if (add_free_record(tdb, off, + sizeof(struct tdb_used_record) + + rec_key_length(&rec) + + rec_data_length(&rec) + + rec_extra_padding(&rec)) != 0) + goto unlock_err; + + unlock_lists(tdb, start, num_locks, F_WRLCK); return 0; unlock_err: - unlock_lists(tdb, chain + 1, chain + num, F_WRLCK); + unlock_lists(tdb, start, num_locks, F_WRLCK); return -1; } -int tdb_delete(struct tdb_context *tdb, struct tdb_data key) -{ - tdb_off_t off, start, end, room, extra_locks = 0; - uint64_t h; - int ret; - - h = tdb_hash(tdb, key.dptr, key.dsize); - off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_WRLCK); - if (off == TDB_OFF_ERR) - return -1; - - if (off == 0) { - unlock_lists(tdb, start, end, F_WRLCK); - tdb->ecode = TDB_ERR_NOEXIST; - return -1; - } - - ret = unlink_used_record(tdb, end, &extra_locks); - if (unlikely(ret == 1)) { - unsigned int i; - - unlock_lists(tdb, start, end, F_WRLCK); - - /* We need extra locks at the start. */ - for (i = 0; i < extra_locks; i++) { - if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_WAIT)) { - if (i) - unlock_lists(tdb, 0, i-1, F_WRLCK); - return -1; - } - } - /* Try again now we're holding more locks. */ - ret = tdb_delete(tdb, key); - unlock_lists(tdb, 0, i, F_WRLCK); - return ret; - } - unlock_lists(tdb, start, end, F_WRLCK); - return ret; -} - int tdb_close(struct tdb_context *tdb) { struct tdb_context **i; @@ -879,3 +959,8 @@ int tdb_close(struct tdb_context *tdb) return ret; } + +enum TDB_ERROR tdb_error(struct tdb_context *tdb) +{ + return tdb->ecode; +}