X-Git-Url: https://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Ftdb2%2Ffree.c;h=83ad7ff5d60ac81deb55d564f09296b28ee5812a;hp=5284510a07fbdfd3aa5341360c29448e3eb9f183;hb=012f68a0738e2190580f4639321d18ad5f4611c2;hpb=8a462e5a662a6da97060162f0b9220c60ebfdd8c diff --git a/ccan/tdb2/free.c b/ccan/tdb2/free.c index 5284510a..83ad7ff5 100644 --- a/ccan/tdb2/free.c +++ b/ccan/tdb2/free.c @@ -1,7 +1,7 @@ - /* + /* Trivial Database 2: free list/block handling Copyright (C) Rusty Russell 2010 - + This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either @@ -17,54 +17,18 @@ */ #include "private.h" #include +#include #include #include #include static unsigned fls64(uint64_t val) { -#if HAVE_BUILTIN_CLZL - if (val <= ULONG_MAX) { - /* This is significantly faster! */ - return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0; - } else { -#endif - uint64_t r = 64; - - if (!val) - return 0; - if (!(val & 0xffffffff00000000ull)) { - val <<= 32; - r -= 32; - } - if (!(val & 0xffff000000000000ull)) { - val <<= 16; - r -= 16; - } - if (!(val & 0xff00000000000000ull)) { - val <<= 8; - r -= 8; - } - if (!(val & 0xf000000000000000ull)) { - val <<= 4; - r -= 4; - } - if (!(val & 0xc000000000000000ull)) { - val <<= 2; - r -= 2; - } - if (!(val & 0x8000000000000000ull)) { - val <<= 1; - r -= 1; - } - return r; -#if HAVE_BUILTIN_CLZL - } -#endif + return ilog64(val); } /* In which bucket would we find a particular record size? (ignoring header) */ -unsigned int size_to_bucket(unsigned int zone_bits, tdb_len_t data_len) +unsigned int size_to_bucket(tdb_len_t data_len) { unsigned int bucket; @@ -80,113 +44,106 @@ unsigned int size_to_bucket(unsigned int zone_bits, tdb_len_t data_len) bucket = fls64(data_len - TDB_MIN_DATA_LEN) + 2; } - if (unlikely(bucket > BUCKETS_FOR_ZONE(zone_bits))) - bucket = BUCKETS_FOR_ZONE(zone_bits); + if (unlikely(bucket >= TDB_FREE_BUCKETS)) + bucket = TDB_FREE_BUCKETS - 1; return bucket; } -/* Subtract 1-byte tailer and header. Then round up to next power of 2. */ -static unsigned max_zone_bits(struct tdb_context *tdb) +tdb_off_t first_ftable(struct tdb_context *tdb) { - return fls64(tdb->map_size-1-sizeof(struct tdb_header)-1) + 1; + return tdb_read_off(tdb, offsetof(struct tdb_header, free_table)); } -/* Start by using a random zone to spread the load: returns the offset. */ -static uint64_t random_zone(struct tdb_context *tdb) +tdb_off_t next_ftable(struct tdb_context *tdb, tdb_off_t ftable) { - struct free_zone_header zhdr; - tdb_off_t off = sizeof(struct tdb_header); - tdb_len_t half_bits; - uint64_t randbits = 0; - unsigned int i; - - for (i = 0; i < 64; i += fls64(RAND_MAX)) - randbits ^= ((uint64_t)random()) << i; - - /* FIXME: Does this work? Test! */ - half_bits = max_zone_bits(tdb) - 1; - do { - /* Pick left or right side (not outside file) */ - if ((randbits & 1) - && !tdb->methods->oob(tdb, off + (1ULL << half_bits) - + sizeof(zhdr), true)) { - off += 1ULL << half_bits; - } - randbits >>= 1; + return tdb_read_off(tdb, ftable + offsetof(struct tdb_freetable,next)); +} - if (tdb_read_convert(tdb, off, &zhdr, sizeof(zhdr)) == -1) - return TDB_OFF_ERR; +int tdb_ftable_init(struct tdb_context *tdb) +{ + /* Use reservoir sampling algorithm to select a free list at random. */ + unsigned int rnd, max = 0, count = 0; + tdb_off_t off; - if (zhdr.zone_bits == half_bits) - return off; + tdb->ftable_off = off = first_ftable(tdb); + tdb->ftable = 0; - half_bits--; - } while (half_bits >= INITIAL_ZONE_BITS); + while (off) { + if (off == TDB_OFF_ERR) + return -1; - tdb->ecode = TDB_ERR_CORRUPT; - tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, - "random_zone: zone at %llu smaller than %u bits?", - (long long)off, INITIAL_ZONE_BITS); - return TDB_OFF_ERR; -} + rnd = random(); + if (rnd >= max) { + tdb->ftable_off = off; + tdb->ftable = count; + max = rnd; + } -int tdb_zone_init(struct tdb_context *tdb) -{ - tdb->zone_off = random_zone(tdb); - if (tdb->zone_off == TDB_OFF_ERR) - return -1; - if (tdb_read_convert(tdb, tdb->zone_off, - &tdb->zhdr, sizeof(tdb->zhdr)) == -1) - return -1; + off = next_ftable(tdb, off); + count++; + } return 0; } -/* Where's the header, given a zone size of 1 << zone_bits? */ -static tdb_off_t zone_off(tdb_off_t off, unsigned int zone_bits) -{ - off -= sizeof(struct tdb_header); - return (off & ~((1ULL << zone_bits) - 1)) + sizeof(struct tdb_header); -} - /* Offset of a given bucket. */ -/* FIXME: bucket can be "unsigned" everywhere, or even uint8/16. */ -tdb_off_t bucket_off(tdb_off_t zone_off, tdb_off_t bucket) +tdb_off_t bucket_off(tdb_off_t ftable_off, unsigned bucket) { - return zone_off - + sizeof(struct free_zone_header) + return ftable_off + offsetof(struct tdb_freetable, buckets) + bucket * sizeof(tdb_off_t); } /* Returns free_buckets + 1, or list number to search. */ -static tdb_off_t find_free_head(struct tdb_context *tdb, tdb_off_t bucket) +static tdb_off_t find_free_head(struct tdb_context *tdb, + tdb_off_t ftable_off, + tdb_off_t bucket) { /* Speculatively search for a non-zero bucket. */ - return tdb_find_nonzero_off(tdb, bucket_off(tdb->zone_off, 0), - bucket, - BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits) + 1); + return tdb_find_nonzero_off(tdb, bucket_off(ftable_off, 0), + bucket, TDB_FREE_BUCKETS); } /* Remove from free bucket. */ static int remove_from_list(struct tdb_context *tdb, - tdb_off_t b_off, struct tdb_free_record *r) + tdb_off_t b_off, tdb_off_t r_off, + const struct tdb_free_record *r) { tdb_off_t off; /* Front of list? */ - if (r->prev == 0) { + if (frec_prev(r) == 0) { off = b_off; } else { - off = r->prev + offsetof(struct tdb_free_record, next); + off = frec_prev(r) + offsetof(struct tdb_free_record, next); + } + +#ifdef CCAN_TDB2_DEBUG + if (tdb_read_off(tdb, off) != r_off) { + tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR, + "remove_from_list: %llu bad prev in list %llu", + (long long)r_off, (long long)b_off); + return -1; } +#endif + /* r->prev->next = r->next */ if (tdb_write_off(tdb, off, r->next)) { return -1; } if (r->next != 0) { - off = r->next + offsetof(struct tdb_free_record, prev); + off = r->next + offsetof(struct tdb_free_record,magic_and_prev); /* r->next->prev = r->prev */ - if (tdb_write_off(tdb, off, r->prev)) { + +#ifdef CCAN_TDB2_DEBUG + if (tdb_read_off(tdb, off) & TDB_OFF_MASK != r_off) { + tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR, + "remove_from_list: %llu bad list %llu", + (long long)r_off, (long long)b_off); + return -1; + } +#endif + + if (tdb_write_off(tdb, off, r->magic_and_prev)) { return -1; } } @@ -197,150 +154,212 @@ static int remove_from_list(struct tdb_context *tdb, static int enqueue_in_free(struct tdb_context *tdb, tdb_off_t b_off, tdb_off_t off, - struct tdb_free_record *new) + tdb_len_t len) { - new->prev = 0; + struct tdb_free_record new; + uint64_t magic = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL)); + + /* We only need to set ftable_and_len; rest is set in enqueue_in_free */ + new.ftable_and_len = ((uint64_t)tdb->ftable << (64 - TDB_OFF_UPPER_STEAL)) + | len; + /* prev = 0. */ + new.magic_and_prev = magic; + /* new->next = head. */ - new->next = tdb_read_off(tdb, b_off); - if (new->next == TDB_OFF_ERR) + new.next = tdb_read_off(tdb, b_off); + if (new.next == TDB_OFF_ERR) return -1; - if (new->next) { + if (new.next) { +#ifdef CCAN_TDB2_DEBUG + if (tdb_read_off(tdb, + new.next + offsetof(struct tdb_free_record, + magic_and_prev)) + != magic) { + tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR, + "enqueue_in_free: %llu bad head" + " prev %llu", + (long long)new.next, (long long)b_off); + return -1; + } +#endif /* next->prev = new. */ - if (tdb_write_off(tdb, new->next - + offsetof(struct tdb_free_record, prev), - off) != 0) + if (tdb_write_off(tdb, new.next + + offsetof(struct tdb_free_record, + magic_and_prev), + off | magic) != 0) return -1; } /* head = new */ if (tdb_write_off(tdb, b_off, off) != 0) return -1; - return tdb_write_convert(tdb, off, new, sizeof(*new)); + return tdb_write_convert(tdb, off, &new, sizeof(new)); } /* List need not be locked. */ int add_free_record(struct tdb_context *tdb, - unsigned int zone_bits, tdb_off_t off, tdb_len_t len_with_header) { - struct tdb_free_record new; tdb_off_t b_off; + tdb_len_t len; int ret; + enum TDB_ERROR ecode; - assert(len_with_header >= sizeof(new)); - assert(zone_bits < (1 << 6)); + assert(len_with_header >= sizeof(struct tdb_free_record)); - new.magic_and_meta = TDB_FREE_MAGIC | zone_bits; - new.data_len = len_with_header - sizeof(struct tdb_used_record); + len = len_with_header - sizeof(struct tdb_used_record); - b_off = bucket_off(zone_off(off, zone_bits), - size_to_bucket(zone_bits, new.data_len)); - if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) != 0) + b_off = bucket_off(tdb->ftable_off, size_to_bucket(len)); + ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return -1; + } - ret = enqueue_in_free(tdb, b_off, off, &new); + ret = enqueue_in_free(tdb, b_off, off, len); tdb_unlock_free_bucket(tdb, b_off); return ret; } +static size_t adjust_size(size_t keylen, size_t datalen) +{ + size_t size = keylen + datalen; + + if (size < TDB_MIN_DATA_LEN) + size = TDB_MIN_DATA_LEN; + + /* Round to next uint64_t boundary. */ + return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL); +} + /* If we have enough left over to be useful, split that off. */ -static int to_used_record(struct tdb_context *tdb, - unsigned int zone_bits, - tdb_off_t off, - tdb_len_t needed, - tdb_len_t total_len, - tdb_len_t *actual) +static size_t record_leftover(size_t keylen, size_t datalen, + bool want_extra, size_t total_len) { - struct tdb_used_record used; - tdb_len_t leftover; + ssize_t leftover; - leftover = total_len - needed; - if (leftover < sizeof(struct tdb_free_record)) - leftover = 0; + if (want_extra) + datalen += datalen / 2; + leftover = total_len - adjust_size(keylen, datalen); - *actual = total_len - leftover; + if (leftover < (ssize_t)sizeof(struct tdb_free_record)) + return 0; - if (leftover) { - if (add_free_record(tdb, zone_bits, - off + sizeof(used) + *actual, - total_len - needed)) - return -1; - } - return 0; + return leftover; +} + +static tdb_off_t ftable_offset(struct tdb_context *tdb, unsigned int ftable) +{ + tdb_off_t off; + unsigned int i; + + if (likely(tdb->ftable == ftable)) + return tdb->ftable_off; + + off = first_ftable(tdb); + for (i = 0; i < ftable; i++) + off = next_ftable(tdb, off); + return off; } /* Note: we unlock the current bucket if we coalesce or fail. */ static int coalesce(struct tdb_context *tdb, - tdb_off_t zone_off, unsigned zone_bits, tdb_off_t off, tdb_off_t b_off, tdb_len_t data_len) { - struct tdb_free_record pad, *r; - tdb_off_t end = off + sizeof(struct tdb_used_record) + data_len; + tdb_off_t end; + struct tdb_free_record rec; + + add_stat(tdb, alloc_coalesce_tried, 1); + end = off + sizeof(struct tdb_used_record) + data_len; - while (end < (zone_off + (1ULL << zone_bits))) { + while (end < tdb->map_size) { + const struct tdb_free_record *r; tdb_off_t nb_off; + unsigned ftable, bucket; - /* FIXME: do tdb_get here and below really win? */ - r = tdb_get(tdb, end, &pad, sizeof(pad)); + r = tdb_access_read(tdb, end, sizeof(*r), true); if (!r) goto err; - if (frec_magic(r) != TDB_FREE_MAGIC) + if (frec_magic(r) != TDB_FREE_MAGIC + || frec_ftable(r) == TDB_FTABLE_NONE) { + tdb_access_release(tdb, r); break; + } - nb_off = bucket_off(zone_off, - size_to_bucket(zone_bits, r->data_len)); + ftable = frec_ftable(r); + bucket = size_to_bucket(frec_len(r)); + nb_off = bucket_off(ftable_offset(tdb, ftable), bucket); + tdb_access_release(tdb, r); /* We may be violating lock order here, so best effort. */ - if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT) == -1) + if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT) + != TDB_SUCCESS) { + add_stat(tdb, alloc_coalesce_lockfail, 1); break; + } /* Now we have lock, re-check. */ - r = tdb_get(tdb, end, &pad, sizeof(pad)); - if (!r) { + if (tdb_read_convert(tdb, end, &rec, sizeof(rec))) { tdb_unlock_free_bucket(tdb, nb_off); goto err; } - if (unlikely(frec_magic(r) != TDB_FREE_MAGIC)) { + if (unlikely(frec_magic(&rec) != TDB_FREE_MAGIC)) { + add_stat(tdb, alloc_coalesce_race, 1); + tdb_unlock_free_bucket(tdb, nb_off); + break; + } + + if (unlikely(frec_ftable(&rec) != ftable) + || unlikely(size_to_bucket(frec_len(&rec)) != bucket)) { + add_stat(tdb, alloc_coalesce_race, 1); tdb_unlock_free_bucket(tdb, nb_off); break; } - if (remove_from_list(tdb, nb_off, r) == -1) { + if (remove_from_list(tdb, nb_off, end, &rec) == -1) { tdb_unlock_free_bucket(tdb, nb_off); goto err; } - end += sizeof(struct tdb_used_record) + r->data_len; + end += sizeof(struct tdb_used_record) + frec_len(&rec); tdb_unlock_free_bucket(tdb, nb_off); + add_stat(tdb, alloc_coalesce_num_merged, 1); } /* Didn't find any adjacent free? */ if (end == off + sizeof(struct tdb_used_record) + data_len) return 0; - /* OK, expand record */ - r = tdb_get(tdb, off, &pad, sizeof(pad)); - if (!r) + /* OK, expand initial record */ + if (tdb_read_convert(tdb, off, &rec, sizeof(rec))) goto err; - if (r->data_len != data_len) { - tdb->ecode = TDB_ERR_CORRUPT; - tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, - "coalesce: expected data len %llu not %llu\n", - (long long)data_len, (long long)r->data_len); + if (frec_len(&rec) != data_len) { + tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR, + "coalesce: expected data len %zu not %zu", + (size_t)data_len, (size_t)frec_len(&rec)); goto err; } - if (remove_from_list(tdb, b_off, r) == -1) + if (remove_from_list(tdb, b_off, off, &rec) == -1) goto err; - /* We have to drop this to avoid deadlocks. */ + /* We have to drop this to avoid deadlocks, so make sure record + * doesn't get coalesced by someone else! */ + rec.ftable_and_len = (TDB_FTABLE_NONE << (64 - TDB_OFF_UPPER_STEAL)) + | (end - off - sizeof(struct tdb_used_record)); + if (tdb_write_off(tdb, off + offsetof(struct tdb_free_record, + ftable_and_len), + rec.ftable_and_len) != 0) + goto err; + + add_stat(tdb, alloc_coalesce_succeeded, 1); tdb_unlock_free_bucket(tdb, b_off); - if (add_free_record(tdb, zone_bits, off, end - off) == -1) + if (add_free_record(tdb, off, end - off) == -1) return -1; return 1; @@ -352,31 +371,39 @@ err: /* We need size bytes to put our key and data in. */ static tdb_off_t lock_and_alloc(struct tdb_context *tdb, - tdb_off_t zone_off, - unsigned zone_bits, + tdb_off_t ftable_off, tdb_off_t bucket, - size_t size, - tdb_len_t *actual) + size_t keylen, size_t datalen, + bool want_extra, + unsigned magic, + unsigned hashlow) { tdb_off_t off, b_off,best_off; - struct tdb_free_record pad, best = { 0 }, *r; + struct tdb_free_record best = { 0 }; double multiplier; + size_t size = adjust_size(keylen, datalen); + enum TDB_ERROR ecode; + add_stat(tdb, allocs, 1); again: - b_off = bucket_off(zone_off, bucket); + b_off = bucket_off(ftable_off, bucket); - /* FIXME: Try non-blocking wait first, to measure contention. - * If we're contented, try switching zones, and don't enlarge zone - * next time (we want more zones). */ + /* FIXME: Try non-blocking wait first, to measure contention. */ /* Lock this bucket. */ - if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == -1) { + ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return TDB_OFF_ERR; } - best.data_len = -1ULL; + best.ftable_and_len = -1ULL; best_off = 0; - /* FIXME: Start with larger multiplier if we're growing. */ - multiplier = 1.0; + + /* Get slack if we're after extra. */ + if (want_extra) + multiplier = 1.5; + else + multiplier = 1.0; /* Walk the list to see if any are large enough, getting less fussy * as we go. */ @@ -385,31 +412,40 @@ again: goto unlock_err; while (off) { - /* FIXME: Does tdb_get win anything here? */ - r = tdb_get(tdb, off, &pad, sizeof(*r)); + const struct tdb_free_record *r; + tdb_len_t len; + tdb_off_t next; + + r = tdb_access_read(tdb, off, sizeof(*r), true); if (!r) goto unlock_err; if (frec_magic(r) != TDB_FREE_MAGIC) { - tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, - "lock_and_alloc: %llu non-free 0x%llx\n", - (long long)off, (long long)r->magic_and_meta); + tdb_access_release(tdb, r); + tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR, + "lock_and_alloc: %llu non-free 0x%llx", + (long long)off, (long long)r->magic_and_prev); goto unlock_err; } - if (r->data_len >= size && r->data_len < best.data_len) { + if (frec_len(r) >= size && frec_len(r) < frec_len(&best)) { best_off = off; best = *r; } - if (best.data_len < size * multiplier && best_off) - goto use_best; + if (frec_len(&best) < size * multiplier && best_off) { + tdb_access_release(tdb, r); + break; + } multiplier *= 1.01; + next = r->next; + len = frec_len(r); + tdb_access_release(tdb, r); + /* Since we're going slow anyway, try coalescing here. */ - switch (coalesce(tdb, zone_off, zone_bits, off, b_off, - r->data_len)) { + switch (coalesce(tdb, off, b_off, len)) { case -1: /* This has already unlocked on error. */ return -1; @@ -417,21 +453,43 @@ again: /* This has unlocked list, restart. */ goto again; } - off = r->next; + off = next; } /* If we found anything at all, use it. */ if (best_off) { - use_best: + struct tdb_used_record rec; + size_t leftover; + /* We're happy with this size: take it. */ - if (remove_from_list(tdb, b_off, &best) != 0) + if (remove_from_list(tdb, b_off, best_off, &best) != 0) goto unlock_err; - tdb_unlock_free_bucket(tdb, b_off); - if (to_used_record(tdb, zone_bits, best_off, size, - best.data_len, actual)) { - return -1; + leftover = record_leftover(keylen, datalen, want_extra, + frec_len(&best)); + + assert(keylen + datalen + leftover <= frec_len(&best)); + /* We need to mark non-free before we drop lock, otherwise + * coalesce() could try to merge it! */ + if (set_header(tdb, &rec, magic, keylen, datalen, + frec_len(&best) - leftover, hashlow) != 0) + goto unlock_err; + + if (tdb_write_convert(tdb, best_off, &rec, sizeof(rec)) != 0) + goto unlock_err; + + /* Bucket of leftover will be <= current bucket, so nested + * locking is allowed. */ + if (leftover) { + add_stat(tdb, alloc_leftover, 1); + if (add_free_record(tdb, + best_off + sizeof(rec) + + frec_len(&best) - leftover, + leftover)) + best_off = TDB_OFF_ERR; } + tdb_unlock_free_bucket(tdb, b_off); + return best_off; } @@ -443,89 +501,82 @@ unlock_err: return TDB_OFF_ERR; } -static bool next_zone(struct tdb_context *tdb) +/* Get a free block from current free list, or 0 if none. */ +static tdb_off_t get_free(struct tdb_context *tdb, + size_t keylen, size_t datalen, bool want_extra, + unsigned magic, unsigned hashlow) { - tdb_off_t next = tdb->zone_off + (1ULL << tdb->zhdr.zone_bits); - - /* We must have a header. */ - if (tdb->methods->oob(tdb, next + sizeof(tdb->zhdr), true)) - return false; - - tdb->zone_off = next; - return tdb_read_convert(tdb, next, &tdb->zhdr, sizeof(tdb->zhdr)) == 0; -} - -/* Offset returned is within current zone (which it may alter). */ -static tdb_off_t get_free(struct tdb_context *tdb, size_t size, - tdb_len_t *actual) -{ - tdb_off_t start_zone = tdb->zone_off, off; + tdb_off_t off, ftable_off; + unsigned start_b, b, ftable; bool wrapped = false; - /* FIXME: If we don't get a hit in the first bucket we want, - * try changing zones for next time. That should help wear - * zones evenly, so we don't need to search all of them before - * expanding. */ - while (!wrapped || tdb->zone_off != start_zone) { - tdb_off_t b; - - /* Shortcut for really huge allocations... */ - if ((size >> tdb->zhdr.zone_bits) != 0) - continue; + /* If they are growing, add 50% to get to higher bucket. */ + if (want_extra) + start_b = size_to_bucket(adjust_size(keylen, + datalen + datalen / 2)); + else + start_b = size_to_bucket(adjust_size(keylen, datalen)); + ftable_off = tdb->ftable_off; + ftable = tdb->ftable; + while (!wrapped || ftable_off != tdb->ftable_off) { /* Start at exact size bucket, and search up... */ - b = size_to_bucket(tdb->zhdr.zone_bits, size); - for (b = find_free_head(tdb, b); - b <= BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits); - b += find_free_head(tdb, b + 1)) { + for (b = find_free_head(tdb, ftable_off, start_b); + b < TDB_FREE_BUCKETS; + b = find_free_head(tdb, ftable_off, b + 1)) { /* Try getting one from list. */ - off = lock_and_alloc(tdb, tdb->zone_off, - tdb->zhdr.zone_bits, - b, size, actual); + off = lock_and_alloc(tdb, ftable_off, + b, keylen, datalen, want_extra, + magic, hashlow); if (off == TDB_OFF_ERR) return TDB_OFF_ERR; - if (off != 0) + if (off != 0) { + if (b == start_b) + add_stat(tdb, alloc_bucket_exact, 1); + if (b == TDB_FREE_BUCKETS - 1) + add_stat(tdb, alloc_bucket_max, 1); + /* Worked? Stay using this list. */ + tdb->ftable_off = ftable_off; + tdb->ftable = ftable; return off; + } /* Didn't work. Try next bucket. */ } - /* Didn't work, try next zone, if it exists. */ - if (!next_zone(tdb)) { + /* Hmm, try next table. */ + ftable_off = next_ftable(tdb, ftable_off); + ftable++; + + if (ftable_off == 0) { wrapped = true; - tdb->zone_off = sizeof(struct tdb_header); - if (tdb_read_convert(tdb, tdb->zone_off, - &tdb->zhdr, sizeof(tdb->zhdr))) { - return TDB_OFF_ERR; - } + ftable_off = first_ftable(tdb); + ftable = 0; } } + return 0; } int set_header(struct tdb_context *tdb, struct tdb_used_record *rec, - uint64_t keylen, uint64_t datalen, - uint64_t actuallen, uint64_t hash, - unsigned int zone_bits) + unsigned magic, uint64_t keylen, uint64_t datalen, + uint64_t actuallen, unsigned hashlow) { uint64_t keybits = (fls64(keylen) + 1) / 2; /* Use bottom bits of hash, so it's independent of hash table size. */ - rec->magic_and_meta - = zone_bits - | ((hash & ((1 << 5)-1)) << 6) + rec->magic_and_meta = (hashlow & ((1 << 11)-1)) | ((actuallen - (keylen + datalen)) << 11) | (keybits << 43) - | (TDB_MAGIC << 48); + | ((uint64_t)magic << 48); rec->key_and_data_len = (keylen | (datalen << (keybits*2))); /* Encoding can fail on big values. */ if (rec_key_length(rec) != keylen || rec_data_length(rec) != datalen || rec_extra_padding(rec) != actuallen - (keylen + datalen)) { - tdb->ecode = TDB_ERR_IO; - tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, - "Could not encode k=%llu,d=%llu,a=%llu\n", + tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR, + "Could not encode k=%llu,d=%llu,a=%llu", (long long)keylen, (long long)datalen, (long long)actuallen); return -1; @@ -533,156 +584,78 @@ int set_header(struct tdb_context *tdb, return 0; } -static bool zones_happy(struct tdb_context *tdb) -{ - /* FIXME: look at distribution of zones. */ - return true; -} - -/* Assume we want buckets up to the comfort factor. */ -static tdb_len_t overhead(unsigned int zone_bits) -{ - return sizeof(struct free_zone_header) - + (BUCKETS_FOR_ZONE(zone_bits) + 1) * sizeof(tdb_off_t); -} - -/* Expand the database (by adding a zone). */ +/* Expand the database. */ static int tdb_expand(struct tdb_context *tdb, tdb_len_t size) { uint64_t old_size; - tdb_off_t off; - uint8_t zone_bits; - unsigned int num_buckets; tdb_len_t wanted; - struct free_zone_header zhdr; - bool enlarge_zone; + enum TDB_ERROR ecode; /* We need room for the record header too. */ wanted = sizeof(struct tdb_used_record) + size; + /* Need to hold a hash lock to expand DB: transactions rely on it. */ + if (!(tdb->flags & TDB_NOLOCK) + && !tdb->allrecord_lock.count && !tdb_has_hash_locks(tdb)) { + tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR, + "tdb_expand: must hold lock during expand"); + return -1; + } + + /* always make room for at least 100 more records, and at + least 25% more space. */ + if (size * TDB_EXTENSION_FACTOR > tdb->map_size / 4) + wanted = size * TDB_EXTENSION_FACTOR; + else + wanted = tdb->map_size / 4; + wanted = adjust_size(0, wanted); + /* Only one person can expand file at a time. */ - if (tdb_lock_expand(tdb, F_WRLCK) != 0) + ecode = tdb_lock_expand(tdb, F_WRLCK); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; return -1; + } /* Someone else may have expanded the file, so retry. */ old_size = tdb->map_size; tdb->methods->oob(tdb, tdb->map_size + 1, true); - if (tdb->map_size != old_size) - goto success; - - /* FIXME: Tailer is a bogus optimization, remove it. */ - /* zone bits tailer char is protected by EXPAND lock. */ - if (tdb->methods->read(tdb, old_size - 1, &zone_bits, 1) == -1) - goto fail; - - /* If zones aren't working well, add larger zone if possible. */ - enlarge_zone = !zones_happy(tdb); - - /* New zone can be between zone_bits or larger if we're on the right - * boundary. */ - for (;;) { - /* Does this fit the allocation comfortably? */ - if ((1ULL << zone_bits) >= overhead(zone_bits) + wanted) { - /* Only let enlarge_zone enlarge us once. */ - if (!enlarge_zone) - break; - enlarge_zone = false; - } - if ((old_size - 1 - sizeof(struct tdb_header)) - & (1 << zone_bits)) - break; - zone_bits++; + if (tdb->map_size != old_size) { + tdb_unlock_expand(tdb, F_WRLCK); + return 0; } - zhdr.zone_bits = zone_bits; - num_buckets = BUCKETS_FOR_ZONE(zone_bits); - - /* FIXME: I don't think we need to expand to full zone, do we? */ - if (tdb->methods->expand_file(tdb, 1ULL << zone_bits) == -1) - goto fail; - - /* Write new tailer. */ - if (tdb->methods->write(tdb, tdb->map_size - 1, &zone_bits, 1) == -1) - goto fail; - - /* Write new zone header (just before old tailer). */ - off = old_size - 1; - if (tdb_write_convert(tdb, off, &zhdr, sizeof(zhdr)) == -1) - goto fail; - - /* Now write empty buckets. */ - off += sizeof(zhdr); - if (zero_out(tdb, off, (num_buckets+1) * sizeof(tdb_off_t)) == -1) - goto fail; - off += (num_buckets+1) * sizeof(tdb_off_t); - - /* Now add the rest as our free record. */ - if (add_free_record(tdb, zone_bits, off, tdb->map_size-1-off) == -1) - goto fail; - - /* Try allocating from this zone now. */ - tdb->zone_off = old_size - 1; - tdb->zhdr = zhdr; - -success: - tdb_unlock_expand(tdb, F_WRLCK); - return 0; + ecode = tdb->methods->expand_file(tdb, wanted); + if (ecode != TDB_SUCCESS) { + tdb->ecode = ecode; + tdb_unlock_expand(tdb, F_WRLCK); + return -1; + } -fail: + /* We need to drop this lock before adding free record. */ tdb_unlock_expand(tdb, F_WRLCK); - return -1; -} - -static tdb_len_t adjust_size(size_t keylen, size_t datalen, bool growing) -{ - tdb_len_t size = keylen + datalen; - - if (size < TDB_MIN_DATA_LEN) - size = TDB_MIN_DATA_LEN; - - /* Overallocate if this is coming from an enlarging store. */ - if (growing) - size += datalen / 2; - /* Round to next uint64_t boundary. */ - return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL); + add_stat(tdb, expands, 1); + return add_free_record(tdb, old_size, wanted); } /* This won't fail: it will expand the database if it has to. */ tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen, - uint64_t hash, bool growing) + uint64_t hash, unsigned magic, bool growing) { tdb_off_t off; - tdb_len_t size, actual; - struct tdb_used_record rec; /* We can't hold pointers during this: we could unmap! */ assert(!tdb->direct_access); - size = adjust_size(keylen, datalen, growing); - -again: - off = get_free(tdb, size, &actual); - if (unlikely(off == TDB_OFF_ERR)) - return off; + for (;;) { + off = get_free(tdb, keylen, datalen, growing, magic, hash); + if (likely(off != 0)) + break; - if (unlikely(off == 0)) { - if (tdb_expand(tdb, size) == -1) + if (tdb_expand(tdb, adjust_size(keylen, datalen))) return TDB_OFF_ERR; - goto again; } - /* Some supergiant values can't be encoded. */ - /* FIXME: Check before, and limit actual in get_free. */ - if (set_header(tdb, &rec, keylen, datalen, actual, hash, - tdb->zhdr.zone_bits) != 0) { - add_free_record(tdb, tdb->zhdr.zone_bits, off, - sizeof(rec) + actual); - return TDB_OFF_ERR; - } - - if (tdb_write_convert(tdb, off, &rec, sizeof(rec)) != 0) - return TDB_OFF_ERR; - return off; }