X-Git-Url: https://git.ozlabs.org/?a=blobdiff_plain;f=ccan%2Ftdb2%2Ftdb.c;h=40811c2eff83609ca2c55d757815e2a0cae9f9d1;hb=4f73f6a6dfc0d65aa9a5055683bf7baa5a7b622e;hp=5d69eedcdc4ecf58079609628d59b902c4623b56;hpb=06e0037d97f5e1d83667ec40627cef862f3b7b85;p=ccan diff --git a/ccan/tdb2/tdb.c b/ccan/tdb2/tdb.c index 5d69eedc..40811c2e 100644 --- a/ccan/tdb2/tdb.c +++ b/ccan/tdb2/tdb.c @@ -21,9 +21,9 @@ null_log_fn(struct tdb_context *tdb, /* We do a lot of work assuming our copy of the header volatile area * is uptodate, and usually it is. However, once we grab a lock, we have to * re-check it. */ -bool update_header(struct tdb_context *tdb) +bool header_changed(struct tdb_context *tdb) { - struct tdb_header_volatile pad, *v; + uint64_t gen; if (!(tdb->flags & TDB_NOLOCK) && tdb->header_uptodate) { tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv, @@ -33,17 +33,23 @@ bool update_header(struct tdb_context *tdb) /* We could get a partial update if we're not holding any locks. */ assert((tdb->flags & TDB_NOLOCK) || tdb_has_locks(tdb)); - v = tdb_get(tdb, offsetof(struct tdb_header, v), &pad, sizeof(*v)); - if (!v) { - /* On failure, imply we updated header so they retry. */ - return true; - } tdb->header_uptodate = true; - if (likely(memcmp(&tdb->header.v, v, sizeof(*v)) == 0)) { - return false; + gen = tdb_read_off(tdb, offsetof(struct tdb_header, v.generation)); + if (unlikely(gen != tdb->header.v.generation)) { + tdb_read_convert(tdb, offsetof(struct tdb_header, v), + &tdb->header.v, sizeof(tdb->header.v)); + return true; } - tdb->header.v = *v; - return true; + return false; +} + +int write_header(struct tdb_context *tdb) +{ + assert(tdb_read_off(tdb, offsetof(struct tdb_header, v.generation)) + == tdb->header.v.generation); + tdb->header.v.generation++; + return tdb_write_convert(tdb, offsetof(struct tdb_header, v), + &tdb->header.v, sizeof(tdb->header.v)); } static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed, @@ -118,12 +124,22 @@ static uint64_t random_number(struct tdb_context *tdb) return ret; } -struct new_database { +struct new_db_head { struct tdb_header hdr; + struct free_zone_header zhdr; + tdb_off_t free[BUCKETS_FOR_ZONE(INITIAL_ZONE_BITS) + 1]; struct tdb_used_record hrec; tdb_off_t hash[1ULL << INITIAL_HASH_BITS]; - struct tdb_used_record frec; - tdb_off_t free[INITIAL_FREE_BUCKETS + 1]; /* One overflow bucket */ + struct tdb_free_record frec; +}; + +struct new_database { + struct new_db_head h; + /* Rest up to 1 << INITIAL_ZONE_BITS is empty. */ + char space[(1 << INITIAL_ZONE_BITS) + - (sizeof(struct new_db_head) - sizeof(struct tdb_header))]; + uint8_t tailer; + /* Don't count final padding! */ }; /* initialise a new database */ @@ -131,51 +147,64 @@ static int tdb_new_database(struct tdb_context *tdb) { /* We make it up in memory, then write it out if not internal */ struct new_database newdb; - unsigned int magic_off = offsetof(struct tdb_header, magic_food); + unsigned int bucket, magic_off, dbsize; - /* Fill in the header */ - newdb.hdr.version = TDB_VERSION; - newdb.hdr.hash_seed = random_number(tdb); - newdb.hdr.hash_test = TDB_HASH_MAGIC; - newdb.hdr.hash_test = tdb->khash(&newdb.hdr.hash_test, - sizeof(newdb.hdr.hash_test), - newdb.hdr.hash_seed, - tdb->hash_priv); - - newdb.hdr.v.generation = 0; - - /* The initial zone must cover the initial database size! */ - BUILD_ASSERT((1ULL << INITIAL_ZONE_BITS) >= sizeof(newdb)); - - /* Free array has 1 zone, 10 buckets. All buckets empty. */ - newdb.hdr.v.num_zones = 1; - newdb.hdr.v.zone_bits = INITIAL_ZONE_BITS; - newdb.hdr.v.free_buckets = INITIAL_FREE_BUCKETS; - newdb.hdr.v.free_off = offsetof(struct new_database, free); - set_header(tdb, &newdb.frec, 0, - sizeof(newdb.free), sizeof(newdb.free), 0); - memset(newdb.free, 0, sizeof(newdb.free)); + /* Don't want any extra padding! */ + dbsize = offsetof(struct new_database, tailer) + sizeof(newdb.tailer); + /* Fill in the header */ + newdb.h.hdr.version = TDB_VERSION; + newdb.h.hdr.hash_seed = random_number(tdb); + newdb.h.hdr.hash_test = TDB_HASH_MAGIC; + newdb.h.hdr.hash_test = tdb->khash(&newdb.h.hdr.hash_test, + sizeof(newdb.h.hdr.hash_test), + newdb.h.hdr.hash_seed, + tdb->hash_priv); + memset(newdb.h.hdr.reserved, 0, sizeof(newdb.h.hdr.reserved)); + newdb.h.hdr.v.generation = 0; /* Initial hashes are empty. */ - newdb.hdr.v.hash_bits = INITIAL_HASH_BITS; - newdb.hdr.v.hash_off = offsetof(struct new_database, hash); - set_header(tdb, &newdb.hrec, 0, - sizeof(newdb.hash), sizeof(newdb.hash), 0); - memset(newdb.hash, 0, sizeof(newdb.hash)); + newdb.h.hdr.v.hash_bits = INITIAL_HASH_BITS; + newdb.h.hdr.v.hash_off = offsetof(struct new_database, h.hash); + set_header(tdb, &newdb.h.hrec, 0, + sizeof(newdb.h.hash), sizeof(newdb.h.hash), 0, + INITIAL_ZONE_BITS); + memset(newdb.h.hash, 0, sizeof(newdb.h.hash)); + + /* Create the single free entry. */ + newdb.h.frec.magic_and_meta = TDB_FREE_MAGIC | INITIAL_ZONE_BITS; + newdb.h.frec.data_len = (sizeof(newdb.h.frec) + - sizeof(struct tdb_used_record) + + sizeof(newdb.space)); + + /* Free is mostly empty... */ + newdb.h.zhdr.zone_bits = INITIAL_ZONE_BITS; + memset(newdb.h.free, 0, sizeof(newdb.h.free)); + + /* ... except for this one bucket. */ + bucket = size_to_bucket(INITIAL_ZONE_BITS, newdb.h.frec.data_len); + newdb.h.free[bucket] = offsetof(struct new_database, h.frec); + newdb.h.frec.next = newdb.h.frec.prev = 0; + + /* Clear free space to keep valgrind happy, and avoid leaking stack. */ + memset(newdb.space, 0, sizeof(newdb.space)); + + /* Tailer contains maximum number of free_zone bits. */ + newdb.tailer = INITIAL_ZONE_BITS; /* Magic food */ - memset(newdb.hdr.magic_food, 0, sizeof(newdb.hdr.magic_food)); - strcpy(newdb.hdr.magic_food, TDB_MAGIC_FOOD); + memset(newdb.h.hdr.magic_food, 0, sizeof(newdb.h.hdr.magic_food)); + strcpy(newdb.h.hdr.magic_food, TDB_MAGIC_FOOD); /* This creates an endian-converted database, as if read from disk */ + magic_off = offsetof(struct tdb_header, magic_food); tdb_convert(tdb, - (char *)&newdb.hdr + magic_off, - sizeof(newdb) - magic_off); + (char *)&newdb.h.hdr + magic_off, + dbsize - 1 - magic_off); - tdb->header = newdb.hdr; + tdb->header = newdb.h.hdr; if (tdb->flags & TDB_INTERNAL) { - tdb->map_size = sizeof(newdb); + tdb->map_size = dbsize; tdb->map_ptr = malloc(tdb->map_size); if (!tdb->map_ptr) { tdb->ecode = TDB_ERR_OOM; @@ -190,7 +219,7 @@ static int tdb_new_database(struct tdb_context *tdb) if (ftruncate(tdb->fd, 0) == -1) return -1; - if (!tdb_pwrite_all(tdb->fd, &newdb, sizeof(newdb), 0)) { + if (!tdb_pwrite_all(tdb->fd, &newdb, dbsize, 0)) { tdb->ecode = TDB_ERR_IO; return -1; } @@ -216,7 +245,7 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, tdb->name = NULL; tdb->map_ptr = NULL; tdb->fd = -1; - /* map_size will be set below. */ + tdb->map_size = sizeof(struct tdb_header); tdb->ecode = TDB_SUCCESS; /* header will be read in below. */ tdb->header_uptodate = false; @@ -230,12 +259,24 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, tdb_io_init(tdb); tdb_lock_init(tdb); - /* FIXME */ - if (attr) { - tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, - "tdb_open: attributes not yet supported\n"); - errno = EINVAL; - goto fail; + while (attr) { + switch (attr->base.attr) { + case TDB_ATTRIBUTE_LOG: + tdb->log = attr->log.log_fn; + tdb->log_priv = attr->log.log_private; + break; + case TDB_ATTRIBUTE_HASH: + tdb->khash = attr->hash.hash_fn; + tdb->hash_priv = attr->hash.hash_private; + break; + default: + tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, + "tdb_open: unknown attribute type %u\n", + attr->base.attr); + errno = EINVAL; + goto fail; + } + attr = attr->base.next; } if ((open_flags & O_ACCMODE) == O_WRONLY) { @@ -262,6 +303,7 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, } TEST_IT(tdb->flags & TDB_CONVERT); tdb_convert(tdb, &tdb->header, sizeof(tdb->header)); + tdb_zone_init(tdb); return tdb; } @@ -337,12 +379,16 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, goto fail; } - tdb->map_size = st.st_size; tdb->device = st.st_dev; tdb->inode = st.st_ino; - tdb_mmap(tdb); tdb_unlock_open(tdb); - tdb_zone_init(tdb); + + /* This make sure we have current map_size and mmap. */ + tdb->methods->oob(tdb, tdb->map_size + 1, true); + + /* Now we can pick a random free zone to start from. */ + if (tdb_zone_init(tdb) == -1) + goto fail; tdb->next = tdbs; tdbs = tdb; @@ -374,215 +420,387 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, return NULL; } -static int tdb_key_compare(TDB_DATA key, TDB_DATA data, void *private_data) +tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list) { - return memcmp(data.dptr, key.dptr, data.dsize) == 0; + return tdb->header.v.hash_off + + ((list & ((1ULL << tdb->header.v.hash_bits) - 1)) + * sizeof(tdb_off_t)); } -static void unlock_lists(struct tdb_context *tdb, - uint64_t start, uint64_t end, int ltype) -{ - do { - tdb_unlock_list(tdb, start, ltype); - start = (start + 1) & ((1ULL << tdb->header.v.hash_bits) - 1); - } while (start != end); -} - -/* FIXME: Return header copy? */ -/* Returns -1 or offset of entry (0 if not found). - * Locks hash entried from *start to *end (where the entry was found). */ -static tdb_off_t find_bucket_and_lock(struct tdb_context *tdb, - const struct tdb_data *key, - uint64_t hash, - uint64_t *start, - uint64_t *end, - uint64_t *room, - int ltype) +/* Returns 0 if the entry is a zero (definitely not a match). + * Returns a valid entry offset if it's a match. Fills in rec. + * Otherwise returns TDB_OFF_ERR: keep searching. */ +static tdb_off_t entry_matches(struct tdb_context *tdb, + uint64_t list, + uint64_t hash, + const struct tdb_data *key, + struct tdb_used_record *rec) { - uint64_t hextra; tdb_off_t off; + uint64_t keylen; + const unsigned char *rkey; - /* hash_bits might be out of date... */ -again: - *start = *end = hash & ((1ULL << tdb->header.v.hash_bits) - 1); - hextra = hash >> tdb->header.v.hash_bits; + list &= ((1ULL << tdb->header.v.hash_bits) - 1); - /* FIXME: can we avoid locks for some fast paths? */ - if (tdb_lock_list(tdb, *end, ltype, TDB_LOCK_WAIT) == -1) + off = tdb_read_off(tdb, tdb->header.v.hash_off + + list * sizeof(tdb_off_t)); + if (off == 0 || off == TDB_OFF_ERR) + return off; + +#if 0 /* FIXME: Check other bits. */ + unsigned int bits, bitmask, hoffextra; + /* Bottom three bits show how many extra hash bits. */ + bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1; + bitmask = (1 << bits)-1; + hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask); + uint64_t hextra = hash >> tdb->header.v.hash_bits; + if ((hextra & bitmask) != hoffextra) return TDB_OFF_ERR; + off &= ~...; +#endif - /* We only need to check this for first lock. */ - if (unlikely(update_header(tdb))) { - tdb_unlock_list(tdb, *end, ltype); - goto again; - } + if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1) + return TDB_OFF_ERR; - while ((off = tdb_read_off(tdb, tdb->header.v.hash_off - + *end * sizeof(tdb_off_t))) - != TDB_OFF_ERR) { - struct tdb_used_record pad, *r; - uint64_t keylen, next; + /* FIXME: check extra bits in header! */ + keylen = rec_key_length(rec); + if (keylen != key->dsize) + return TDB_OFF_ERR; - /* Didn't find it? */ - if (!off) - return 0; + rkey = tdb_access_read(tdb, off + sizeof(*rec), keylen, false); + if (!rkey) + return TDB_OFF_ERR; + if (memcmp(rkey, key->dptr, keylen) != 0) + off = TDB_OFF_ERR; + tdb_access_release(tdb, rkey); + return off; +} -#if 0 /* FIXME: Check other bits. */ - unsigned int bits, bitmask, hoffextra; - /* Bottom three bits show how many extra hash bits. */ - bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1; - bitmask = (1 << bits)-1; - hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask); - if ((hextra & bitmask) != hoffextra) - goto lock_next; -#endif +/* FIXME: Optimize? */ +static void unlock_lists(struct tdb_context *tdb, + tdb_off_t list, tdb_len_t num, + int ltype) +{ + tdb_off_t i; - r = tdb_get(tdb, off, &pad, sizeof(*r)); - if (!r) - goto unlock_err; + for (i = list; i < list + num; i++) + tdb_unlock_list(tdb, i, ltype); +} - if (rec_magic(r) != TDB_MAGIC) { - tdb->ecode = TDB_ERR_CORRUPT; - tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, - "find_bucket_and_lock: bad magic 0x%llx" - " at offset %llu!\n", - (long long)rec_magic(r), (long long)off); - goto unlock_err; +/* FIXME: Optimize? */ +static int lock_lists(struct tdb_context *tdb, + tdb_off_t list, tdb_len_t num, + int ltype) +{ + tdb_off_t i; + + for (i = list; i < list + num; i++) { + if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT) + == TDB_OFF_ERR) { + unlock_lists(tdb, list, i - list, ltype); + return -1; } + } + return 0; +} - /* FIXME: check extra bits in header! */ - keylen = rec_key_length(r); - if (keylen != key->dsize) - goto lock_next; - - switch (tdb_parse_data(tdb, *key, off + sizeof(*r), key->dsize, - tdb_key_compare, NULL)) { - case 1: - /* Match! */ - *room = rec_data_length(r) + rec_extra_padding(r); - return off >> TDB_EXTRA_HASHBITS_NUM; - case 0: - break; - default: - goto unlock_err; +/* We lock hashes up to the next empty offset. We already hold the + * lock on the start bucket, but we may need to release and re-grab + * it. If we fail, we hold no locks at all! */ +static tdb_len_t relock_hash_to_zero(struct tdb_context *tdb, + tdb_off_t start, int ltype) +{ + tdb_len_t num, len; + +again: + num = 1ULL << tdb->header.v.hash_bits; + len = tdb_find_zero_off(tdb, hash_off(tdb, start), num - start); + if (unlikely(len == num - start)) { + /* We hit the end of the hash range. Drop lock: we have + to lock start of hash first. */ + tdb_len_t pre_locks; + + tdb_unlock_list(tdb, start, ltype); + + /* Grab something, so header is stable. */ + if (tdb_lock_list(tdb, 0, ltype, TDB_LOCK_WAIT)) + return TDB_OFF_ERR; + pre_locks = tdb_find_zero_off(tdb, hash_off(tdb, 0), num); + /* We want to lock the zero entry as well. */ + pre_locks++; + if (lock_lists(tdb, 1, pre_locks - 1, ltype) == -1) { + tdb_unlock_list(tdb, 0, ltype); + return TDB_OFF_ERR; } - lock_next: - /* Lock next bucket. */ - /* FIXME: We can deadlock if this wraps! */ - next = (*end + 1) & ((1ULL << tdb->header.v.hash_bits) - 1); - if (next == *start) { - tdb->ecode = TDB_ERR_CORRUPT; - tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, - "find_bucket_and_lock: full hash table!\n"); - goto unlock_err; + /* Now lock later ones. */ + if (unlikely(lock_lists(tdb, start, len, ltype) == -1)) { + unlock_lists(tdb, 0, pre_locks, ltype); + return TDB_OFF_ERR; + } + len += pre_locks; + } else { + /* We want to lock the zero entry as well. */ + len++; + /* But we already have lock on start. */ + if (unlikely(lock_lists(tdb, start+1, len-1, ltype) == -1)) { + tdb_unlock_list(tdb, start, ltype); + return TDB_OFF_ERR; } - if (tdb_lock_list(tdb, next, ltype, TDB_LOCK_WAIT) == -1) - goto unlock_err; - *end = next; } -unlock_err: - TEST_IT(*end < *start); - unlock_lists(tdb, *start, *end, ltype); - return TDB_OFF_ERR; + /* Now, did we lose the race, and it's not zero any more? */ + if (unlikely(tdb_read_off(tdb, hash_off(tdb, start + len - 1)) != 0)) { + /* Leave the start locked, as expected. */ + unlock_lists(tdb, start + 1, len - 1, ltype); + goto again; + } + + return len; } +/* FIXME: modify, don't rewrite! */ static int update_rec_hdr(struct tdb_context *tdb, tdb_off_t off, tdb_len_t keylen, tdb_len_t datalen, - tdb_len_t room, + struct tdb_used_record *rec, uint64_t h) { - struct tdb_used_record rec; + uint64_t dataroom = rec_data_length(rec) + rec_extra_padding(rec); + + if (set_header(tdb, rec, keylen, datalen, keylen + dataroom, h, + rec_zone_bits(rec))) + return -1; + + return tdb_write_convert(tdb, off, rec, sizeof(*rec)); +} + +static int hash_add(struct tdb_context *tdb, + uint64_t hash, tdb_off_t off) +{ + tdb_off_t i, hoff, len, num; - if (set_header(tdb, &rec, keylen, datalen, room - datalen, h)) + /* Look for next space. */ + i = (hash & ((1ULL << tdb->header.v.hash_bits) - 1)); + len = (1ULL << tdb->header.v.hash_bits) - i; + num = tdb_find_zero_off(tdb, hash_off(tdb, i), len); + + if (unlikely(num == len)) { + /* We wrapped. Look through start of hash table. */ + i = 0; + hoff = hash_off(tdb, 0); + len = (1ULL << tdb->header.v.hash_bits); + num = tdb_find_zero_off(tdb, hoff, len); + if (num == len) { + tdb->ecode = TDB_ERR_CORRUPT; + tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, + "hash_add: full hash table!\n"); + return -1; + } + } + if (tdb_read_off(tdb, hash_off(tdb, i + num)) != 0) { + tdb->ecode = TDB_ERR_CORRUPT; + tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, + "hash_add: overwriting hash table?\n"); return -1; + } - return tdb_write_convert(tdb, off, &rec, sizeof(rec)); + /* FIXME: Encode extra hash bits! */ + return tdb_write_off(tdb, hash_off(tdb, i + num), off); } /* If we fail, others will try after us. */ static void enlarge_hash(struct tdb_context *tdb) { - tdb_off_t newoff, i; - uint64_t h, num = 1ULL << tdb->header.v.hash_bits; + tdb_off_t newoff, oldoff, i; + tdb_len_t hlen; + uint64_t num = 1ULL << tdb->header.v.hash_bits; struct tdb_used_record pad, *r; + unsigned int records = 0; /* FIXME: We should do this without holding locks throughout. */ if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1) return; - if (unlikely(update_header(tdb))) { - /* Someone else enlarged for us? Nothing to do. */ - if ((1ULL << tdb->header.v.hash_bits) != num) - goto unlock; - } + /* Someone else enlarged for us? Nothing to do. */ + if ((1ULL << tdb->header.v.hash_bits) != num) + goto unlock; - newoff = alloc(tdb, 0, num * 2, 0, false); + /* Allocate our new array. */ + hlen = num * sizeof(tdb_off_t) * 2; + newoff = alloc(tdb, 0, hlen, 0, false); if (unlikely(newoff == TDB_OFF_ERR)) goto unlock; - if (unlikely(newoff == 0)) { - if (tdb_expand(tdb, 0, num * 2, false) == -1) - goto unlock; - newoff = alloc(tdb, 0, num * 2, 0, false); - if (newoff == TDB_OFF_ERR || newoff == 0) - goto unlock; - } + /* Step over record header! */ + newoff += sizeof(struct tdb_used_record); + + /* Starts all zero. */ + if (zero_out(tdb, newoff, hlen) == -1) + goto unlock; + + /* Update header now so we can use normal routines. */ + oldoff = tdb->header.v.hash_off; + + tdb->header.v.hash_bits++; + tdb->header.v.hash_off = newoff; /* FIXME: If the space before is empty, we know this is in its ideal - * location. We can steal a bit from the pointer to avoid rehash. */ - for (i = tdb_find_nonzero_off(tdb, tdb->header.v.hash_off, num); - i < num; - i += tdb_find_nonzero_off(tdb, tdb->header.v.hash_off - + i*sizeof(tdb_off_t), num - i)) { + * location. Or steal a bit from the pointer to avoid rehash. */ + for (i = 0; i < num; i++) { tdb_off_t off; - off = tdb_read_off(tdb, tdb->header.v.hash_off - + i*sizeof(tdb_off_t)); + off = tdb_read_off(tdb, oldoff + i * sizeof(tdb_off_t)); if (unlikely(off == TDB_OFF_ERR)) - goto unlock; - if (unlikely(!off)) { - tdb->ecode = TDB_ERR_CORRUPT; - tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, - "find_bucket_and_lock: zero hash bucket!\n"); - goto unlock; - } - h = hash_record(tdb, off); - /* FIXME: Encode extra hash bits! */ - if (tdb_write_off(tdb, newoff - + (h & ((num * 2) - 1)) * sizeof(uint64_t), - off) == -1) - goto unlock; + goto oldheader; + if (off && hash_add(tdb, hash_record(tdb, off), off) == -1) + goto oldheader; + if (off) + records++; } + tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv, + "enlarge_hash: moved %u records from %llu buckets.\n", + records, (long long)num); + /* Free up old hash. */ - r = tdb_get(tdb, tdb->header.v.hash_off, &pad, sizeof(*r)); + r = tdb_get(tdb, oldoff - sizeof(*r), &pad, sizeof(*r)); if (!r) - goto unlock; - add_free_record(tdb, tdb->header.v.hash_off, - rec_data_length(r) + rec_extra_padding(r)); + goto oldheader; + add_free_record(tdb, rec_zone_bits(r), oldoff - sizeof(*r), + sizeof(*r)+rec_data_length(r)+rec_extra_padding(r)); /* Now we write the modified header. */ - tdb->header.v.generation++; - tdb->header.v.hash_bits++; - tdb->header.v.hash_off = newoff; - tdb_write_convert(tdb, offsetof(struct tdb_header, v), - &tdb->header.v, sizeof(tdb->header.v)); + write_header(tdb); unlock: tdb_allrecord_unlock(tdb, F_WRLCK); + return; + +oldheader: + tdb->header.v.hash_bits--; + tdb->header.v.hash_off = oldoff; + goto unlock; +} + + +/* This is the slow version of the routine which searches the + * hashtable for an entry. + * We lock every hash bucket up to and including the next zero one. + */ +static tdb_off_t find_and_lock_slow(struct tdb_context *tdb, + struct tdb_data key, + uint64_t h, + int ltype, + tdb_off_t *start_lock, + tdb_len_t *num_locks, + tdb_off_t *bucket, + struct tdb_used_record *rec) +{ + /* Warning: this may drop the lock on *bucket! */ + *num_locks = relock_hash_to_zero(tdb, *start_lock, ltype); + if (*num_locks == TDB_OFF_ERR) + return TDB_OFF_ERR; + + for (*bucket = *start_lock; + *bucket < *start_lock + *num_locks; + (*bucket)++) { + tdb_off_t off = entry_matches(tdb, *bucket, h, &key, rec); + /* Empty entry or we found it? */ + if (off == 0 || off != TDB_OFF_ERR) + return off; + } + + /* We didn't find a zero entry? Something went badly wrong... */ + unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype); + tdb->ecode = TDB_ERR_CORRUPT; + tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, + "find_and_lock: expected to find an empty hash bucket!\n"); + return TDB_OFF_ERR; +} + +/* This is the core routine which searches the hashtable for an entry. + * On error, no locks are held and TDB_OFF_ERR is returned. + * Otherwise, *num_locks locks of type ltype from *start_lock are held. + * The bucket where the entry is (or would be) is in *bucket. + * If not found, the return value is 0. + * If found, the return value is the offset, and *rec is the record. */ +static tdb_off_t find_and_lock(struct tdb_context *tdb, + struct tdb_data key, + uint64_t h, + int ltype, + tdb_off_t *start_lock, + tdb_len_t *num_locks, + tdb_off_t *bucket, + struct tdb_used_record *rec) +{ + tdb_off_t off; + + /* FIXME: can we avoid locks for some fast paths? */ + *start_lock = tdb_lock_list(tdb, h, ltype, TDB_LOCK_WAIT); + if (*start_lock == TDB_OFF_ERR) + return TDB_OFF_ERR; + + /* Fast path. */ + off = entry_matches(tdb, *start_lock, h, &key, rec); + if (likely(off != TDB_OFF_ERR)) { + *bucket = *start_lock; + *num_locks = 1; + return off; + } + + /* Slow path, need to grab more locks and search. */ + return find_and_lock_slow(tdb, key, h, ltype, start_lock, num_locks, + bucket, rec); +} + +/* Returns -1 on error, 0 on OK" */ +static int replace_data(struct tdb_context *tdb, + uint64_t h, struct tdb_data key, struct tdb_data dbuf, + tdb_off_t bucket, + tdb_off_t old_off, tdb_len_t old_room, + unsigned old_zone, + bool growing) +{ + tdb_off_t new_off; + + /* Allocate a new record. */ + new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing); + if (unlikely(new_off == TDB_OFF_ERR)) + return -1; + + /* We didn't like the existing one: remove it. */ + if (old_off) + add_free_record(tdb, old_zone, old_off, + sizeof(struct tdb_used_record) + + key.dsize + old_room); + + /* FIXME: Encode extra hash bits! */ + if (tdb_write_off(tdb, hash_off(tdb, bucket), new_off) == -1) + return -1; + + new_off += sizeof(struct tdb_used_record); + if (tdb->methods->write(tdb, new_off, key.dptr, key.dsize) == -1) + return -1; + + new_off += key.dsize; + if (tdb->methods->write(tdb, new_off, dbuf.dptr, dbuf.dsize) == -1) + return -1; + + /* FIXME: tdb_increment_seqnum(tdb); */ + return 0; } int tdb_store(struct tdb_context *tdb, struct tdb_data key, struct tdb_data dbuf, int flag) { - tdb_off_t new_off, off, start, end, room; + tdb_off_t off, bucket, start, num; + tdb_len_t old_room = 0; + struct tdb_used_record rec; uint64_t h; - bool growing = false; + int ret; h = tdb_hash(tdb, key.dptr, key.dsize); - off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_WRLCK); - if (off == TDB_OFF_ERR) + off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec); + if (unlikely(off == TDB_OFF_ERR)) return -1; /* Now we have lock on this hash bucket. */ @@ -593,17 +811,22 @@ int tdb_store(struct tdb_context *tdb, } } else { if (off) { - if (room >= key.dsize + dbuf.dsize) { - new_off = off; + old_room = rec_data_length(&rec) + + rec_extra_padding(&rec); + if (old_room >= dbuf.dsize) { + /* Can modify in-place. Easy! */ if (update_rec_hdr(tdb, off, key.dsize, dbuf.dsize, - room, h)) + &rec, h)) + goto fail; + if (tdb->methods->write(tdb, off + sizeof(rec) + + key.dsize, + dbuf.dptr, dbuf.dsize)) goto fail; - goto write; + unlock_lists(tdb, start, num, F_WRLCK); + return 0; } /* FIXME: See if right record is free? */ - /* Hint to allocator that we've realloced. */ - growing = true; } else { if (flag == TDB_MODIFY) { /* if the record doesn't exist and we @@ -615,175 +838,175 @@ int tdb_store(struct tdb_context *tdb, } } - /* Allocate a new record. */ - new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing); - if (new_off == 0) { - unlock_lists(tdb, start, end, F_WRLCK); - /* Expand, then try again... */ - if (tdb_expand(tdb, key.dsize, dbuf.dsize, growing) == -1) - return -1; - return tdb_store(tdb, key, dbuf, flag); - } + /* If we didn't use the old record, this implies we're growing. */ + ret = replace_data(tdb, h, key, dbuf, bucket, off, old_room, + rec_zone_bits(&rec), off != 0); + unlock_lists(tdb, start, num, F_WRLCK); + + /* FIXME: by simple simulation, this approximated 60% full. + * Check in real case! */ + if (unlikely(num > 4 * tdb->header.v.hash_bits - 30)) + enlarge_hash(tdb); + + return ret; + +fail: + unlock_lists(tdb, start, num, F_WRLCK); + return -1; +} + +int tdb_append(struct tdb_context *tdb, + struct tdb_data key, struct tdb_data dbuf) +{ + tdb_off_t off, bucket, start, num; + struct tdb_used_record rec; + tdb_len_t old_room = 0, old_dlen; + uint64_t h; + unsigned char *newdata; + struct tdb_data new_dbuf; + int ret; + + h = tdb_hash(tdb, key.dptr, key.dsize); + off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec); + if (unlikely(off == TDB_OFF_ERR)) + return -1; - /* We didn't like the existing one: remove it. */ if (off) { - add_free_record(tdb, off, sizeof(struct tdb_used_record) - + key.dsize + room); - } + old_dlen = rec_data_length(&rec); + old_room = old_dlen + rec_extra_padding(&rec); -write: - off = tdb->header.v.hash_off + end * sizeof(tdb_off_t); - /* FIXME: Encode extra hash bits! */ - if (tdb_write_off(tdb, off, new_off) == -1) - goto fail; + /* Fast path: can append in place. */ + if (rec_extra_padding(&rec) >= dbuf.dsize) { + if (update_rec_hdr(tdb, off, key.dsize, + old_dlen + dbuf.dsize, &rec, h)) + goto fail; - off = new_off + sizeof(struct tdb_used_record); - if (tdb->methods->write(tdb, off, key.dptr, key.dsize) == -1) - goto fail; - off += key.dsize; - if (tdb->methods->write(tdb, off, dbuf.dptr, dbuf.dsize) == -1) - goto fail; + off += sizeof(rec) + key.dsize + old_dlen; + if (tdb->methods->write(tdb, off, dbuf.dptr, + dbuf.dsize) == -1) + goto fail; - /* FIXME: tdb_increment_seqnum(tdb); */ - unlock_lists(tdb, start, end, F_WRLCK); + /* FIXME: tdb_increment_seqnum(tdb); */ + unlock_lists(tdb, start, num, F_WRLCK); + return 0; + } + /* FIXME: Check right record free? */ + + /* Slow path. */ + newdata = malloc(key.dsize + old_dlen + dbuf.dsize); + if (!newdata) { + tdb->ecode = TDB_ERR_OOM; + tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, + "tdb_append: cannot allocate %llu bytes!\n", + (long long)key.dsize + old_dlen + dbuf.dsize); + goto fail; + } + if (tdb->methods->read(tdb, off + sizeof(rec) + key.dsize, + newdata, old_dlen) != 0) { + free(newdata); + goto fail; + } + memcpy(newdata + old_dlen, dbuf.dptr, dbuf.dsize); + new_dbuf.dptr = newdata; + new_dbuf.dsize = old_dlen + dbuf.dsize; + } else { + newdata = NULL; + new_dbuf = dbuf; + } + + /* If they're using tdb_append(), it implies they're growing record. */ + ret = replace_data(tdb, h, key, new_dbuf, bucket, off, old_room, + rec_zone_bits(&rec), true); + unlock_lists(tdb, start, num, F_WRLCK); + free(newdata); - /* By simple trial and error, this roughly approximates a 60% - * full measure. */ - if (unlikely(end - start > 4 * tdb->header.v.hash_bits - 32)) + /* FIXME: by simple simulation, this approximated 60% full. + * Check in real case! */ + if (unlikely(num > 4 * tdb->header.v.hash_bits - 30)) enlarge_hash(tdb); - return 0; + return ret; fail: - unlock_lists(tdb, start, end, F_WRLCK); + unlock_lists(tdb, start, num, F_WRLCK); return -1; } struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key) { - tdb_off_t off, start, end, room; + tdb_off_t off, start, num, bucket; + struct tdb_used_record rec; uint64_t h; - struct tdb_used_record pad, *r; struct tdb_data ret; h = tdb_hash(tdb, key.dptr, key.dsize); - off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_RDLCK); - if (off == TDB_OFF_ERR) + off = find_and_lock(tdb, key, h, F_RDLCK, &start, &num, &bucket, &rec); + if (unlikely(off == TDB_OFF_ERR)) return tdb_null; if (!off) { - unlock_lists(tdb, start, end, F_RDLCK); - tdb->ecode = TDB_SUCCESS; - return tdb_null; - } - - r = tdb_get(tdb, off, &pad, sizeof(*r)); - if (!r) { - unlock_lists(tdb, start, end, F_RDLCK); - return tdb_null; + tdb->ecode = TDB_ERR_NOEXIST; + ret = tdb_null; + } else { + ret.dsize = rec_data_length(&rec); + ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize, + ret.dsize); } - ret.dsize = rec_data_length(r); - ret.dptr = tdb_alloc_read(tdb, off + sizeof(*r) + key.dsize, - ret.dsize); - unlock_lists(tdb, start, end, F_RDLCK); + unlock_lists(tdb, start, num, F_RDLCK); return ret; } -static int hash_add(struct tdb_context *tdb, uint64_t h, tdb_off_t off) -{ - tdb_off_t i, hoff, len, num; - - i = (h & ((1ULL << tdb->header.v.hash_bits) - 1)); - hoff = tdb->header.v.hash_off + i * sizeof(tdb_off_t); - len = (1ULL << tdb->header.v.hash_bits) - i; - - /* Look for next space. */ - num = tdb_find_zero_off(tdb, hoff, len); - if (unlikely(num == len)) { - hoff = tdb->header.v.hash_off; - len = (1ULL << tdb->header.v.hash_bits); - num = tdb_find_zero_off(tdb, hoff, len); - if (i == len) - return -1; - } - /* FIXME: Encode extra hash bits! */ - return tdb_write_off(tdb, hoff + num * sizeof(tdb_off_t), off); -} - -static int unlink_used_record(struct tdb_context *tdb, tdb_off_t chain, - uint64_t *extra_locks) +int tdb_delete(struct tdb_context *tdb, struct tdb_data key) { - tdb_off_t num, len, i, hoff; - - /* FIXME: Maybe lock more in search? Maybe don't lock if scan - * finds none? */ -again: - len = (1ULL << tdb->header.v.hash_bits) - (chain + 1); - hoff = tdb->header.v.hash_off + (chain + 1) * sizeof(tdb_off_t); - num = tdb_find_zero_off(tdb, hoff, len); - - /* We want to lock the zero entry, too. In the wrap case, - * this locks one extra. That's harmless. */ - num++; - - for (i = chain + 1; i < chain + 1 + num; i++) { - if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_WAIT) == -1) { - if (i != chain + 1) - unlock_lists(tdb, chain + 1, i-1, F_WRLCK); - return -1; - } - } + tdb_off_t i, bucket, off, start, num; + struct tdb_used_record rec; + uint64_t h; - /* The wrap case: we need those locks out of order! */ - if (unlikely(num == len + 1)) { - *extra_locks = tdb_find_zero_off(tdb, tdb->header.v.hash_off, - 1ULL << tdb->header.v.hash_bits); - (*extra_locks)++; - for (i = 0; i < *extra_locks; i++) { - if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_NOWAIT)) { - /* Failed. Caller must lock in order. */ - if (i) - unlock_lists(tdb, 0, i-1, F_WRLCK); - unlock_lists(tdb, chain + 1, chain + num, - F_WRLCK); - return 1; - } - } - num += *extra_locks; - } + h = tdb_hash(tdb, key.dptr, key.dsize); + start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT); + if (unlikely(start == TDB_OFF_ERR)) + return -1; - /* Now we have the locks, be certain that offset is still 0! */ - hoff = tdb->header.v.hash_off - + (((chain + num) * sizeof(tdb_off_t)) - & ((1ULL << tdb->header.v.hash_bits) - 1)); + /* FIXME: Fastpath: if next is zero, we can delete without lock, + * since this lock protects us. */ + off = find_and_lock_slow(tdb, key, h, F_WRLCK, + &start, &num, &bucket, &rec); + if (unlikely(off == TDB_OFF_ERR)) + return -1; - if (unlikely(tdb_read_off(tdb, hoff) != 0)) { - unlock_lists(tdb, chain + 1, chain + num, F_WRLCK); - goto again; + if (!off) { + /* FIXME: We could optimize not found case if it mattered, by + * reading offset after first lock: if it's zero, goto here. */ + unlock_lists(tdb, start, num, F_WRLCK); + tdb->ecode = TDB_ERR_NOEXIST; + return -1; } + /* Since we found the entry, we must have locked it and a zero. */ + assert(num >= 2); - /* OK, all locked. Unlink first one. */ - hoff = tdb->header.v.hash_off + chain * sizeof(tdb_off_t); - if (tdb_write_off(tdb, hoff, 0) == -1) + /* This actually unlinks it. */ + if (tdb_write_off(tdb, hash_off(tdb, bucket), 0) == -1) goto unlock_err; - /* Rehash the rest. */ - for (i = 1; i < num; i++) { - tdb_off_t off; - uint64_t h; + /* Rehash anything following. */ + for (i = bucket+1; i != bucket + num - 1; i++) { + tdb_off_t hoff, off2; + uint64_t h2; - hoff = tdb->header.v.hash_off - + (((chain + i) * sizeof(tdb_off_t)) - & ((1ULL << tdb->header.v.hash_bits) - 1)); - off = tdb_read_off(tdb, hoff); - if (unlikely(off == TDB_OFF_ERR)) + hoff = hash_off(tdb, i); + off2 = tdb_read_off(tdb, hoff); + if (unlikely(off2 == TDB_OFF_ERR)) goto unlock_err; + /* This can happen if we raced. */ + if (unlikely(off2 == 0)) + break; + /* Maybe use a bit to indicate it is in ideal place? */ - h = hash_record(tdb, off); + h2 = hash_record(tdb, off2); /* Is it happy where it is? */ - if ((h & ((1ULL << tdb->header.v.hash_bits)-1)) == (chain + i)) + if (hash_off(tdb, h2) == hoff) continue; /* Remove it. */ @@ -791,57 +1014,26 @@ again: goto unlock_err; /* Rehash it. */ - if (hash_add(tdb, h, off) == -1) + if (hash_add(tdb, h2, off2) == -1) goto unlock_err; } - unlock_lists(tdb, chain + 1, chain + num, F_WRLCK); + + /* Free the deleted entry. */ + if (add_free_record(tdb, rec_zone_bits(&rec), off, + sizeof(struct tdb_used_record) + + rec_key_length(&rec) + + rec_data_length(&rec) + + rec_extra_padding(&rec)) != 0) + goto unlock_err; + + unlock_lists(tdb, start, num, F_WRLCK); return 0; unlock_err: - unlock_lists(tdb, chain + 1, chain + num, F_WRLCK); + unlock_lists(tdb, start, num, F_WRLCK); return -1; } -int tdb_delete(struct tdb_context *tdb, struct tdb_data key) -{ - tdb_off_t off, start, end, room, extra_locks = 0; - uint64_t h; - int ret; - - h = tdb_hash(tdb, key.dptr, key.dsize); - off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_WRLCK); - if (off == TDB_OFF_ERR) - return -1; - - if (off == 0) { - unlock_lists(tdb, start, end, F_WRLCK); - tdb->ecode = TDB_ERR_NOEXIST; - return -1; - } - - ret = unlink_used_record(tdb, end, &extra_locks); - if (unlikely(ret == 1)) { - unsigned int i; - - unlock_lists(tdb, start, end, F_WRLCK); - - /* We need extra locks at the start. */ - for (i = 0; i < extra_locks; i++) { - if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_WAIT)) { - if (i) - unlock_lists(tdb, 0, i-1, F_WRLCK); - return -1; - } - } - /* Try again now we're holding more locks. */ - ret = tdb_delete(tdb, key); - unlock_lists(tdb, 0, i, F_WRLCK); - return ret; - } - unlock_lists(tdb, start, end, F_WRLCK); - return ret; -} - int tdb_close(struct tdb_context *tdb) { struct tdb_context **i;