X-Git-Url: http://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Ftdb2%2Ftdb.c;h=8aceeeb9446b64193f155fa376bba823f3483514;hp=61791c9d5b5ca8af9ef22a1f88e07ce8f8bbafc7;hb=ecec5bef6776fb9d44bdf479b7efbff0040b2b38;hpb=dbf1ac48c3a1f5147dfcd457a7847a03de26a6c7 diff --git a/ccan/tdb2/tdb.c b/ccan/tdb2/tdb.c index 61791c9d..8aceeeb9 100644 --- a/ccan/tdb2/tdb.c +++ b/ccan/tdb2/tdb.c @@ -236,12 +236,24 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, tdb_io_init(tdb); tdb_lock_init(tdb); - /* FIXME */ - if (attr) { - tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, - "tdb_open: attributes not yet supported\n"); - errno = EINVAL; - goto fail; + while (attr) { + switch (attr->base.attr) { + case TDB_ATTRIBUTE_LOG: + tdb->log = attr->log.log_fn; + tdb->log_priv = attr->log.log_private; + break; + case TDB_ATTRIBUTE_HASH: + tdb->khash = attr->hash.hash_fn; + tdb->hash_priv = attr->hash.hash_private; + break; + default: + tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, + "tdb_open: unknown attribute type %u\n", + attr->base.attr); + errno = EINVAL; + goto fail; + } + attr = attr->base.next; } if ((open_flags & O_ACCMODE) == O_WRONLY) { @@ -268,6 +280,8 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, } TEST_IT(tdb->flags & TDB_CONVERT); tdb_convert(tdb, &tdb->header, sizeof(tdb->header)); + /* Zones don't matter for internal db. */ + tdb->last_zone = 0; return tdb; } @@ -400,6 +414,8 @@ static tdb_off_t entry_matches(struct tdb_context *tdb, uint64_t keylen; const unsigned char *rkey; + list &= ((1ULL << tdb->header.v.hash_bits) - 1); + off = tdb_read_off(tdb, tdb->header.v.hash_off + list * sizeof(tdb_off_t)); if (off == 0 || off == TDB_OFF_ERR) @@ -425,7 +441,7 @@ static tdb_off_t entry_matches(struct tdb_context *tdb, if (keylen != key->dsize) return TDB_OFF_ERR; - rkey = tdb_access_read(tdb, off + sizeof(*rec), keylen); + rkey = tdb_access_read(tdb, off + sizeof(*rec), keylen, false); if (!rkey) return TDB_OFF_ERR; if (memcmp(rkey, key->dptr, keylen) != 0) @@ -435,7 +451,7 @@ static tdb_off_t entry_matches(struct tdb_context *tdb, } /* FIXME: Optimize? */ -static void unlock_range(struct tdb_context *tdb, +static void unlock_lists(struct tdb_context *tdb, tdb_off_t list, tdb_len_t num, int ltype) { @@ -446,15 +462,16 @@ static void unlock_range(struct tdb_context *tdb, } /* FIXME: Optimize? */ -static int lock_range(struct tdb_context *tdb, +static int lock_lists(struct tdb_context *tdb, tdb_off_t list, tdb_len_t num, int ltype) { tdb_off_t i; for (i = list; i < list + num; i++) { - if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT) != 0) { - unlock_range(tdb, list, i - list, ltype); + if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT) + == TDB_OFF_ERR) { + unlock_lists(tdb, list, i - list, ltype); return -1; } } @@ -467,7 +484,7 @@ static int lock_range(struct tdb_context *tdb, static tdb_len_t relock_hash_to_zero(struct tdb_context *tdb, tdb_off_t start, int ltype) { - tdb_len_t num, len, pre_locks; + tdb_len_t num, len; again: num = 1ULL << tdb->header.v.hash_bits; @@ -475,39 +492,45 @@ again: if (unlikely(len == num - start)) { /* We hit the end of the hash range. Drop lock: we have to lock start of hash first. */ + tdb_len_t pre_locks; + tdb_unlock_list(tdb, start, ltype); + /* Grab something, so header is stable. */ if (tdb_lock_list(tdb, 0, ltype, TDB_LOCK_WAIT)) return TDB_OFF_ERR; - len = tdb_find_zero_off(tdb, hash_off(tdb, 0), num); - if (lock_range(tdb, 1, len, ltype) == -1) { + pre_locks = tdb_find_zero_off(tdb, hash_off(tdb, 0), num); + /* We want to lock the zero entry as well. */ + pre_locks++; + if (lock_lists(tdb, 1, pre_locks - 1, ltype) == -1) { tdb_unlock_list(tdb, 0, ltype); return TDB_OFF_ERR; } - pre_locks = len; - len = num - start; + + /* Now lock later ones. */ + if (unlikely(lock_lists(tdb, start, len, ltype) == -1)) { + unlock_lists(tdb, 0, pre_locks, ltype); + return TDB_OFF_ERR; + } + len += pre_locks; } else { - /* We already have lock on start. */ - start++; - pre_locks = 0; - } - if (unlikely(lock_range(tdb, start, len, ltype) == -1)) { - if (pre_locks) - unlock_range(tdb, 0, pre_locks, ltype); - else + /* We want to lock the zero entry as well. */ + len++; + /* But we already have lock on start. */ + if (unlikely(lock_lists(tdb, start+1, len-1, ltype) == -1)) { tdb_unlock_list(tdb, start, ltype); - return TDB_OFF_ERR; + return TDB_OFF_ERR; + } } /* Now, did we lose the race, and it's not zero any more? */ - if (unlikely(tdb_read_off(tdb, hash_off(tdb, pre_locks + len)) != 0)) { - unlock_range(tdb, 0, pre_locks, ltype); + if (unlikely(tdb_read_off(tdb, hash_off(tdb, start + len - 1)) != 0)) { /* Leave the start locked, as expected. */ - unlock_range(tdb, start + 1, len - 1, ltype); + unlock_lists(tdb, start + 1, len - 1, ltype); goto again; } - return pre_locks + len; + return len; } /* FIXME: modify, don't rewrite! */ @@ -518,9 +541,9 @@ static int update_rec_hdr(struct tdb_context *tdb, struct tdb_used_record *rec, uint64_t h) { - uint64_t room = rec_data_length(rec) + rec_extra_padding(rec); + uint64_t dataroom = rec_data_length(rec) + rec_extra_padding(rec); - if (set_header(tdb, rec, keylen, datalen, room - datalen, h)) + if (set_header(tdb, rec, keylen, datalen, keylen + dataroom, h)) return -1; return tdb_write_convert(tdb, off, rec, sizeof(*rec)); @@ -529,7 +552,8 @@ static int update_rec_hdr(struct tdb_context *tdb, /* If we fail, others will try after us. */ static void enlarge_hash(struct tdb_context *tdb) { - tdb_off_t newoff, i; + tdb_off_t newoff, oldoff, i; + tdb_len_t hlen; uint64_t h, num = 1ULL << tdb->header.v.hash_bits; struct tdb_used_record pad, *r; @@ -541,98 +565,137 @@ static void enlarge_hash(struct tdb_context *tdb) if ((1ULL << tdb->header.v.hash_bits) != num) goto unlock; - newoff = alloc(tdb, 0, num * 2, 0, false); + /* Allocate our new array. */ + hlen = num * sizeof(tdb_off_t) * 2; + newoff = alloc(tdb, 0, hlen, 0, false); if (unlikely(newoff == TDB_OFF_ERR)) goto unlock; if (unlikely(newoff == 0)) { - if (tdb_expand(tdb, 0, num * 2, false) == -1) + if (tdb_expand(tdb, 0, hlen, false) == -1) goto unlock; - newoff = alloc(tdb, 0, num * 2, 0, false); + newoff = alloc(tdb, 0, hlen, 0, false); if (newoff == TDB_OFF_ERR || newoff == 0) goto unlock; } + /* Step over record header! */ + newoff += sizeof(struct tdb_used_record); + + /* Starts all zero. */ + if (zero_out(tdb, newoff, hlen) == -1) + goto unlock; /* FIXME: If the space before is empty, we know this is in its ideal - * location. We can steal a bit from the pointer to avoid rehash. */ - for (i = tdb_find_nonzero_off(tdb, tdb->header.v.hash_off, num); + * location. Or steal a bit from the pointer to avoid rehash. */ + for (i = tdb_find_nonzero_off(tdb, hash_off(tdb, 0), num); i < num; - i += tdb_find_nonzero_off(tdb, tdb->header.v.hash_off - + i*sizeof(tdb_off_t), num - i)) { + i += tdb_find_nonzero_off(tdb, hash_off(tdb, i), num - i)) { tdb_off_t off; - off = tdb_read_off(tdb, tdb->header.v.hash_off - + i*sizeof(tdb_off_t)); + off = tdb_read_off(tdb, hash_off(tdb, i)); if (unlikely(off == TDB_OFF_ERR)) goto unlock; if (unlikely(!off)) { tdb->ecode = TDB_ERR_CORRUPT; tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, - "find_bucket_and_lock: zero hash bucket!\n"); + "enlarge_hash: zero hash bucket!\n"); goto unlock; } - h = hash_record(tdb, off); + + /* Find next empty hash slot. */ + for (h = hash_record(tdb, off); + tdb_read_off(tdb, newoff + (h & ((num * 2)-1)) + * sizeof(tdb_off_t)) != 0; + h++); + /* FIXME: Encode extra hash bits! */ - if (tdb_write_off(tdb, newoff - + (h & ((num * 2) - 1)) * sizeof(uint64_t), - off) == -1) + if (tdb_write_off(tdb, newoff + (h & ((num * 2)-1)) + * sizeof(tdb_off_t), off) == -1) goto unlock; + i++; } /* Free up old hash. */ - r = tdb_get(tdb, tdb->header.v.hash_off, &pad, sizeof(*r)); + oldoff = tdb->header.v.hash_off - sizeof(*r); + r = tdb_get(tdb, oldoff, &pad, sizeof(*r)); if (!r) goto unlock; - add_free_record(tdb, tdb->header.v.hash_off, - rec_data_length(r) + rec_extra_padding(r)); + add_free_record(tdb, oldoff, + sizeof(*r)+rec_data_length(r)+rec_extra_padding(r)); /* Now we write the modified header. */ - tdb->header.v.generation++; tdb->header.v.hash_bits++; tdb->header.v.hash_off = newoff; - tdb_write_convert(tdb, offsetof(struct tdb_header, v), - &tdb->header.v, sizeof(tdb->header.v)); + write_header(tdb); unlock: tdb_allrecord_unlock(tdb, F_WRLCK); } +/* This is the core routine which searches the hashtable for an entry. + * On error, no locks are held and TDB_OFF_ERR is returned. + * Otherwise, *num_locks locks of type ltype from *start_lock are held. + * The bucket where the entry is (or would be) is in *bucket. + * If not found, the return value is 0. + * If found, the return value is the offset, and *rec is the record. */ +static tdb_off_t find_and_lock(struct tdb_context *tdb, + struct tdb_data key, + uint64_t h, + int ltype, + tdb_off_t *start_lock, + tdb_len_t *num_locks, + tdb_off_t *bucket, + struct tdb_used_record *rec) +{ + tdb_off_t off; + + /* FIXME: can we avoid locks for some fast paths? */ + *start_lock = tdb_lock_list(tdb, h, ltype, TDB_LOCK_WAIT); + if (*start_lock == TDB_OFF_ERR) + return TDB_OFF_ERR; + + /* Fast path. */ + off = entry_matches(tdb, *start_lock, h, &key, rec); + if (likely(off != TDB_OFF_ERR)) { + *bucket = *start_lock; + *num_locks = 1; + return off; + } + + /* Slow path, need to grab more locks and search. */ + + /* Warning: this may drop the lock on *bucket! */ + *num_locks = relock_hash_to_zero(tdb, *start_lock, ltype); + if (*num_locks == TDB_OFF_ERR) + return TDB_OFF_ERR; + + for (*bucket = *start_lock; + *bucket < *start_lock + *num_locks; + (*bucket)++) { + off = entry_matches(tdb, *bucket, h, &key, rec); + /* Empty entry or we found it? */ + if (off == 0 || off != TDB_OFF_ERR) + return off; + } + + /* We didn't find a zero entry? Something went badly wrong... */ + unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype); + tdb->ecode = TDB_ERR_CORRUPT; + tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, + "find_and_lock: expected to find an empty hash bucket!\n"); + return TDB_OFF_ERR; +} + int tdb_store(struct tdb_context *tdb, struct tdb_data key, struct tdb_data dbuf, int flag) { - tdb_off_t new_off, off, old_bucket, start, num_locks = 1; + tdb_off_t new_off, off, bucket, start, num; struct tdb_used_record rec; uint64_t h; bool growing = false; h = tdb_hash(tdb, key.dptr, key.dsize); - - /* FIXME: can we avoid locks for some fast paths? */ - start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT); - if (start == TDB_OFF_ERR) + off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec); + if (unlikely(off == TDB_OFF_ERR)) return -1; - /* Fast path. */ - old_bucket = start; - off = entry_matches(tdb, start, h, &key, &rec); - if (unlikely(off == TDB_OFF_ERR)) { - /* Slow path, need to grab more locks and search. */ - tdb_off_t i; - - /* Warning: this may drop the lock! Does that on error. */ - num_locks = relock_hash_to_zero(tdb, start, F_WRLCK); - if (num_locks == TDB_OFF_ERR) - return -1; - - for (i = start; i < start + num_locks; i++) { - off = entry_matches(tdb, i, h, &key, &rec); - /* Empty entry or we found it? */ - if (off == 0 || off != TDB_OFF_ERR) { - old_bucket = i; - break; - } - } - if (i == start + num_locks) - off = 0; - } - /* Now we have lock on this hash bucket. */ if (flag == TDB_INSERT) { if (off) { @@ -667,7 +730,7 @@ int tdb_store(struct tdb_context *tdb, /* Allocate a new record. */ new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing); if (new_off == 0) { - unlock_range(tdb, start, num_locks, F_WRLCK); + unlock_lists(tdb, start, num, F_WRLCK); /* Expand, then try again... */ if (tdb_expand(tdb, key.dsize, dbuf.dsize, growing) == -1) return -1; @@ -682,11 +745,11 @@ int tdb_store(struct tdb_context *tdb, + rec_extra_padding(&rec)); } -write: /* FIXME: Encode extra hash bits! */ - if (tdb_write_off(tdb, hash_off(tdb, old_bucket), new_off) == -1) + if (tdb_write_off(tdb, hash_off(tdb, bucket), new_off) == -1) goto fail; +write: off = new_off + sizeof(struct tdb_used_record); if (tdb->methods->write(tdb, off, key.dptr, key.dsize) == -1) goto fail; @@ -695,65 +758,42 @@ write: goto fail; /* FIXME: tdb_increment_seqnum(tdb); */ - unlock_range(tdb, start, num_locks, F_WRLCK); + unlock_lists(tdb, start, num, F_WRLCK); /* FIXME: by simple simulation, this approximated 60% full. * Check in real case! */ - if (unlikely(num_locks > 4 * tdb->header.v.hash_bits - 31)) + if (unlikely(num > 4 * tdb->header.v.hash_bits - 30)) enlarge_hash(tdb); return 0; fail: - unlock_range(tdb, start, num_locks, F_WRLCK); + unlock_lists(tdb, start, num, F_WRLCK); return -1; } struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key) { - tdb_off_t off, start, num_locks = 1; + tdb_off_t off, start, num, bucket; struct tdb_used_record rec; uint64_t h; struct tdb_data ret; h = tdb_hash(tdb, key.dptr, key.dsize); - - /* FIXME: can we avoid locks for some fast paths? */ - start = tdb_lock_list(tdb, h, F_RDLCK, TDB_LOCK_WAIT); - if (start == TDB_OFF_ERR) + off = find_and_lock(tdb, key, h, F_RDLCK, &start, &num, &bucket, &rec); + if (unlikely(off == TDB_OFF_ERR)) return tdb_null; - /* Fast path. */ - off = entry_matches(tdb, start, h, &key, &rec); - if (unlikely(off == TDB_OFF_ERR)) { - /* Slow path, need to grab more locks and search. */ - tdb_off_t i; - - /* Warning: this may drop the lock! Does that on error. */ - num_locks = relock_hash_to_zero(tdb, start, F_RDLCK); - if (num_locks == TDB_OFF_ERR) - return tdb_null; - - for (i = start; i < start + num_locks; i++) { - off = entry_matches(tdb, i, h, &key, &rec); - /* Empty entry or we found it? */ - if (off == 0 || off != TDB_OFF_ERR) - break; - } - if (i == start + num_locks) - off = 0; - } - if (!off) { - unlock_range(tdb, start, num_locks, F_RDLCK); tdb->ecode = TDB_ERR_NOEXIST; - return tdb_null; + ret = tdb_null; + } else { + ret.dsize = rec_data_length(&rec); + ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize, + ret.dsize); } - ret.dsize = rec_data_length(&rec); - ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize, - ret.dsize); - unlock_range(tdb, start, num_locks, F_RDLCK); + unlock_lists(tdb, start, num, F_RDLCK); return ret; } @@ -784,66 +824,28 @@ static int hash_add(struct tdb_context *tdb, uint64_t h, tdb_off_t off) int tdb_delete(struct tdb_context *tdb, struct tdb_data key) { - tdb_off_t i, old_bucket, off, start, num_locks = 1; + tdb_off_t i, bucket, off, start, num; struct tdb_used_record rec; uint64_t h; h = tdb_hash(tdb, key.dptr, key.dsize); - - /* FIXME: can we avoid locks for some fast paths? */ - start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT); - if (start == TDB_OFF_ERR) + off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec); + if (unlikely(off == TDB_OFF_ERR)) return -1; - /* Fast path. */ - old_bucket = start; - off = entry_matches(tdb, start, h, &key, &rec); - if (off && off != TDB_OFF_ERR) { - /* We can only really fastpath delete if next bucket - * is 0. Note that we haven't locked it, but our lock - * on this bucket stops anyone overflowing into it - * while we look. */ - if (tdb_read_off(tdb, hash_off(tdb, h+1)) == 0) - goto delete; - /* Slow path. */ - off = TDB_OFF_ERR; - } - - if (unlikely(off == TDB_OFF_ERR)) { - /* Slow path, need to grab more locks and search. */ - tdb_off_t i; - - /* Warning: this may drop the lock! Does that on error. */ - num_locks = relock_hash_to_zero(tdb, start, F_WRLCK); - if (num_locks == TDB_OFF_ERR) - return -1; - - for (i = start; i < start + num_locks; i++) { - off = entry_matches(tdb, i, h, &key, &rec); - /* Empty entry or we found it? */ - if (off == 0 || off != TDB_OFF_ERR) { - old_bucket = i; - break; - } - } - if (i == start + num_locks) - off = 0; - } - if (!off) { - unlock_range(tdb, start, num_locks, F_WRLCK); + unlock_lists(tdb, start, num, F_WRLCK); tdb->ecode = TDB_ERR_NOEXIST; return -1; } -delete: /* This actually unlinks it. */ - if (tdb_write_off(tdb, hash_off(tdb, old_bucket), 0) == -1) + if (tdb_write_off(tdb, hash_off(tdb, bucket), 0) == -1) goto unlock_err; /* Rehash anything following. */ - for (i = hash_off(tdb, old_bucket+1); - i != hash_off(tdb, h + num_locks); + for (i = hash_off(tdb, bucket+1); + i != hash_off(tdb, h + num - 1); i += sizeof(tdb_off_t)) { tdb_off_t off2; uint64_t h2; @@ -852,6 +854,10 @@ delete: if (unlikely(off2 == TDB_OFF_ERR)) goto unlock_err; + /* This can happen if we raced. */ + if (unlikely(off2 == 0)) + break; + /* Maybe use a bit to indicate it is in ideal place? */ h2 = hash_record(tdb, off2); /* Is it happy where it is? */ @@ -875,11 +881,11 @@ delete: + rec_extra_padding(&rec)) != 0) goto unlock_err; - unlock_range(tdb, start, num_locks, F_WRLCK); + unlock_lists(tdb, start, num, F_WRLCK); return 0; unlock_err: - unlock_range(tdb, start, num_locks, F_WRLCK); + unlock_lists(tdb, start, num, F_WRLCK); return -1; }