- /*
+ /*
Trivial Database 2: free list/block handling
Copyright (C) Rusty Russell 2010
-
+
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
return ilog64(val);
}
-static unsigned ffs64(uint64_t val)
-{
-#if HAVE_BUILTIN_FFSLL
- return __builtin_ffsll(val);
-#else
- unsigned r = 0;
-
- if (!val)
- return 0;
-
- if (!(val & 0xffffffff)) {
- val >>= 32;
- r += 32;
- }
- if (!(val & 0xffff)) {
- val >>= 16;
- r += 16;
- }
- if (!(val & 0xff)) {
- val >>= 8;
- r += 8;
- }
- if (!(val & 0xf)) {
- val >>= 4;
- r += 4;
- }
- if (!(val & 0x3)) {
- val >>= 2;
- r += 2;
- }
- if (!(val & 0x1)) {
- val >>= 1;
- r += 1;
- }
- return r;
-#endif
-}
-
/* In which bucket would we find a particular record size? (ignoring header) */
-unsigned int size_to_bucket(unsigned int zone_bits, tdb_len_t data_len)
+unsigned int size_to_bucket(tdb_len_t data_len)
{
unsigned int bucket;
bucket = fls64(data_len - TDB_MIN_DATA_LEN) + 2;
}
- if (unlikely(bucket > BUCKETS_FOR_ZONE(zone_bits)))
- bucket = BUCKETS_FOR_ZONE(zone_bits);
+ if (unlikely(bucket >= TDB_FREE_BUCKETS))
+ bucket = TDB_FREE_BUCKETS - 1;
return bucket;
}
-/* Binary search for the zone for this offset. */
-static tdb_off_t off_to_zone(struct tdb_context *tdb, tdb_off_t off,
- struct free_zone_header *zhdr)
+tdb_off_t first_ftable(struct tdb_context *tdb)
{
- tdb_off_t start, end;
-
- start = sizeof(struct tdb_header);
- end = start + (1ULL << fls64(tdb->map_size - start));
-
- for (;;) {
- if (tdb_read_convert(tdb, start, zhdr, sizeof(*zhdr)) == -1)
- return TDB_OFF_ERR;
-
- /* Is it inside this zone? */
- if (off < start + (1ULL << zhdr->zone_bits))
- return start;
-
- /* In practice, start + end won't overflow. */
- if (off >= (start + end) / 2)
- start = (start + end) / 2;
- else
- end = (start + end) / 2;
- }
+ return tdb_read_off(tdb, offsetof(struct tdb_header, free_table));
}
-static tdb_off_t last_zone(struct tdb_context *tdb,
- struct free_zone_header *zhdr)
+tdb_off_t next_ftable(struct tdb_context *tdb, tdb_off_t ftable)
{
- return off_to_zone(tdb, tdb->map_size - 1, zhdr);
+ return tdb_read_off(tdb, ftable + offsetof(struct tdb_freetable,next));
}
-int tdb_zone_init(struct tdb_context *tdb)
+enum TDB_ERROR tdb_ftable_init(struct tdb_context *tdb)
{
- unsigned int i;
- uint64_t randoff = 0;
+ /* Use reservoir sampling algorithm to select a free list at random. */
+ unsigned int rnd, max = 0, count = 0;
+ tdb_off_t off;
- /* We start in a random zone, to spread the load. */
- for (i = 0; i < 64; i += fls64(RAND_MAX))
- randoff ^= ((uint64_t)random()) << i;
- randoff = sizeof(struct tdb_header)
- + (randoff % (tdb->map_size - sizeof(struct tdb_header)));
+ tdb->tdb2.ftable_off = off = first_ftable(tdb);
+ tdb->tdb2.ftable = 0;
- tdb->zone_off = off_to_zone(tdb, randoff, &tdb->zhdr);
- if (tdb->zone_off == TDB_OFF_ERR)
- return -1;
- return 0;
-}
+ while (off) {
+ if (TDB_OFF_IS_ERR(off)) {
+ return TDB_OFF_TO_ERR(off);
+ }
-/* Where's the header, given a zone size of 1 << zone_bits? */
-static tdb_off_t zone_off(tdb_off_t off, unsigned int zone_bits)
-{
- off -= sizeof(struct tdb_header);
- return (off & ~((1ULL << zone_bits) - 1)) + sizeof(struct tdb_header);
+ rnd = random();
+ if (rnd >= max) {
+ tdb->tdb2.ftable_off = off;
+ tdb->tdb2.ftable = count;
+ max = rnd;
+ }
+
+ off = next_ftable(tdb, off);
+ count++;
+ }
+ return TDB_SUCCESS;
}
/* Offset of a given bucket. */
-/* FIXME: bucket can be "unsigned" everywhere, or even uint8/16. */
-tdb_off_t bucket_off(tdb_off_t zone_off, tdb_off_t bucket)
+tdb_off_t bucket_off(tdb_off_t ftable_off, unsigned bucket)
{
- return zone_off
- + sizeof(struct free_zone_header)
+ return ftable_off + offsetof(struct tdb_freetable, buckets)
+ bucket * sizeof(tdb_off_t);
}
-/* Returns free_buckets + 1, or list number to search. */
-static tdb_off_t find_free_head(struct tdb_context *tdb, tdb_off_t bucket)
+/* Returns free_buckets + 1, or list number to search, or -ve error. */
+static tdb_off_t find_free_head(struct tdb_context *tdb,
+ tdb_off_t ftable_off,
+ tdb_off_t bucket)
{
/* Speculatively search for a non-zero bucket. */
- return tdb_find_nonzero_off(tdb, bucket_off(tdb->zone_off, 0),
- bucket,
- BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits) + 1);
+ return tdb_find_nonzero_off(tdb, bucket_off(ftable_off, 0),
+ bucket, TDB_FREE_BUCKETS);
}
-/* Remove from free bucket. */
-static int remove_from_list(struct tdb_context *tdb,
- tdb_off_t b_off, tdb_off_t r_off,
- struct tdb_free_record *r)
+static void check_list(struct tdb_context *tdb, tdb_off_t b_off)
{
- tdb_off_t off;
-
- /* Front of list? */
- if (r->prev == 0) {
- off = b_off;
- } else {
- off = r->prev + offsetof(struct tdb_free_record, next);
- }
-
-#ifdef DEBUG
- if (tdb_read_off(tdb, off) != r_off) {
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "remove_from_list: %llu bad prev in list %llu\n",
- (long long)r_off, (long long)b_off);
- return -1;
+#ifdef CCAN_TDB2_DEBUG
+ tdb_off_t off, prev = 0, first;
+ struct tdb_free_record r;
+
+ first = off = (tdb_read_off(tdb, b_off) & TDB_OFF_MASK);
+ while (off != 0) {
+ tdb_read_convert(tdb, off, &r, sizeof(r));
+ if (frec_magic(&r) != TDB_FREE_MAGIC)
+ abort();
+ if (prev && frec_prev(&r) != prev)
+ abort();
+ prev = off;
+ off = r.next;
}
-#endif
- /* r->prev->next = r->next */
- if (tdb_write_off(tdb, off, r->next)) {
- return -1;
+ if (first) {
+ tdb_read_convert(tdb, first, &r, sizeof(r));
+ if (frec_prev(&r) != prev)
+ abort();
}
-
- if (r->next != 0) {
- off = r->next + offsetof(struct tdb_free_record, prev);
- /* r->next->prev = r->prev */
-
-#ifdef DEBUG
- if (tdb_read_off(tdb, off) != r_off) {
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "remove_from_list: %llu bad list %llu\n",
- (long long)r_off, (long long)b_off);
- return -1;
- }
#endif
-
- if (tdb_write_off(tdb, off, r->prev)) {
- return -1;
- }
- }
- return 0;
}
-/* Enqueue in this free bucket. */
-static int enqueue_in_free(struct tdb_context *tdb,
- tdb_off_t b_off,
- tdb_off_t off,
- struct tdb_free_record *new)
+/* Remove from free bucket. */
+static enum TDB_ERROR remove_from_list(struct tdb_context *tdb,
+ tdb_off_t b_off, tdb_off_t r_off,
+ const struct tdb_free_record *r)
{
- new->prev = 0;
- /* new->next = head. */
- new->next = tdb_read_off(tdb, b_off);
- if (new->next == TDB_OFF_ERR)
- return -1;
-
- if (new->next) {
-#ifdef DEBUG
- if (tdb_read_off(tdb,
- new->next
- + offsetof(struct tdb_free_record, prev))
- != 0) {
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "enqueue_in_free: %llu bad head prev %llu\n",
- (long long)new->next, (long long)b_off);
- return -1;
+ tdb_off_t off, prev_next, head;
+ enum TDB_ERROR ecode;
+
+ /* Is this only element in list? Zero out bucket, and we're done. */
+ if (frec_prev(r) == r_off)
+ return tdb_write_off(tdb, b_off, 0);
+
+ /* off = &r->prev->next */
+ off = frec_prev(r) + offsetof(struct tdb_free_record, next);
+
+ /* Get prev->next */
+ prev_next = tdb_read_off(tdb, off);
+ if (TDB_OFF_IS_ERR(prev_next))
+ return TDB_OFF_TO_ERR(prev_next);
+
+ /* If prev->next == 0, we were head: update bucket to point to next. */
+ if (prev_next == 0) {
+ /* We must preserve upper bits. */
+ head = tdb_read_off(tdb, b_off);
+ if (TDB_OFF_IS_ERR(head))
+ return TDB_OFF_TO_ERR(head);
+
+ if ((head & TDB_OFF_MASK) != r_off) {
+ return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
+ "remove_from_list:"
+ " %llu head %llu on list %llu",
+ (long long)r_off,
+ (long long)head,
+ (long long)b_off);
}
-#endif
- /* next->prev = new. */
- if (tdb_write_off(tdb, new->next
- + offsetof(struct tdb_free_record, prev),
- off) != 0)
- return -1;
+ head = ((head & ~TDB_OFF_MASK) | r->next);
+ ecode = tdb_write_off(tdb, b_off, head);
+ if (ecode != TDB_SUCCESS)
+ return ecode;
+ } else {
+ /* r->prev->next = r->next */
+ ecode = tdb_write_off(tdb, off, r->next);
+ if (ecode != TDB_SUCCESS)
+ return ecode;
+ }
+
+ /* If we were the tail, off = &head->prev. */
+ if (r->next == 0) {
+ head = tdb_read_off(tdb, b_off);
+ if (TDB_OFF_IS_ERR(head))
+ return TDB_OFF_TO_ERR(head);
+ head &= TDB_OFF_MASK;
+ off = head + offsetof(struct tdb_free_record, magic_and_prev);
+ } else {
+ /* off = &r->next->prev */
+ off = r->next + offsetof(struct tdb_free_record,
+ magic_and_prev);
}
- /* head = new */
- if (tdb_write_off(tdb, b_off, off) != 0)
- return -1;
- return tdb_write_convert(tdb, off, new, sizeof(*new));
+#ifdef CCAN_TDB2_DEBUG
+ /* *off == r */
+ if ((tdb_read_off(tdb, off) & TDB_OFF_MASK) != r_off) {
+ return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
+ "remove_from_list:"
+ " %llu bad prev in list %llu",
+ (long long)r_off, (long long)b_off);
+ }
+#endif
+ /* r->next->prev = r->prev */
+ return tdb_write_off(tdb, off, r->magic_and_prev);
}
-/* List need not be locked. */
-int add_free_record(struct tdb_context *tdb,
- unsigned int zone_bits,
- tdb_off_t off, tdb_len_t len_with_header)
+/* Enqueue in this free bucket: sets coalesce if we've added 128
+ * entries to it. */
+static enum TDB_ERROR enqueue_in_free(struct tdb_context *tdb,
+ tdb_off_t b_off,
+ tdb_off_t off,
+ tdb_len_t len,
+ bool *coalesce)
{
struct tdb_free_record new;
- tdb_off_t b_off;
- int ret;
+ enum TDB_ERROR ecode;
+ tdb_off_t prev, head;
+ uint64_t magic = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL));
- assert(len_with_header >= sizeof(new));
- assert(zone_bits < 64);
+ head = tdb_read_off(tdb, b_off);
+ if (TDB_OFF_IS_ERR(head))
+ return TDB_OFF_TO_ERR(head);
- new.magic_and_meta = TDB_FREE_MAGIC | zone_bits;
- new.data_len = len_with_header - sizeof(struct tdb_used_record);
+ /* We only need to set ftable_and_len; rest is set in enqueue_in_free */
+ new.ftable_and_len = ((uint64_t)tdb->tdb2.ftable << (64 - TDB_OFF_UPPER_STEAL))
+ | len;
- b_off = bucket_off(zone_off(off, zone_bits),
- size_to_bucket(zone_bits, new.data_len));
- if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) != 0)
- return -1;
+ /* new->next = head. */
+ new.next = (head & TDB_OFF_MASK);
- ret = enqueue_in_free(tdb, b_off, off, &new);
- tdb_unlock_free_bucket(tdb, b_off);
- return ret;
-}
+ /* First element? Prev points to ourselves. */
+ if (!new.next) {
+ new.magic_and_prev = (magic | off);
+ } else {
+ /* new->prev = next->prev */
+ prev = tdb_read_off(tdb,
+ new.next + offsetof(struct tdb_free_record,
+ magic_and_prev));
+ new.magic_and_prev = prev;
+ if (frec_magic(&new) != TDB_FREE_MAGIC) {
+ return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
+ "enqueue_in_free: %llu bad head"
+ " prev %llu",
+ (long long)new.next,
+ (long long)prev);
+ }
+ /* next->prev = new. */
+ ecode = tdb_write_off(tdb, new.next
+ + offsetof(struct tdb_free_record,
+ magic_and_prev),
+ off | magic);
+ if (ecode != TDB_SUCCESS) {
+ return ecode;
+ }
-static size_t adjust_size(size_t keylen, size_t datalen)
-{
- size_t size = keylen + datalen;
+#ifdef CCAN_TDB2_DEBUG
+ prev = tdb_read_off(tdb, frec_prev(&new)
+ + offsetof(struct tdb_free_record, next));
+ if (prev != 0) {
+ return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
+ "enqueue_in_free:"
+ " %llu bad tail next ptr %llu",
+ (long long)frec_prev(&new)
+ + offsetof(struct tdb_free_record,
+ next),
+ (long long)prev);
+ }
+#endif
+ }
- if (size < TDB_MIN_DATA_LEN)
- size = TDB_MIN_DATA_LEN;
+ /* Update enqueue count, but don't set high bit: see TDB_OFF_IS_ERR */
+ if (*coalesce)
+ head += (1ULL << (64 - TDB_OFF_UPPER_STEAL));
+ head &= ~(TDB_OFF_MASK | (1ULL << 63));
+ head |= off;
- /* Round to next uint64_t boundary. */
- return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
+ ecode = tdb_write_off(tdb, b_off, head);
+ if (ecode != TDB_SUCCESS) {
+ return ecode;
+ }
+
+ /* It's time to coalesce if counter wrapped. */
+ if (*coalesce)
+ *coalesce = ((head & ~TDB_OFF_MASK) == 0);
+
+ return tdb_write_convert(tdb, off, &new, sizeof(new));
}
-/* If we have enough left over to be useful, split that off. */
-static size_t record_leftover(size_t keylen, size_t datalen,
- bool want_extra, size_t total_len)
+static tdb_off_t ftable_offset(struct tdb_context *tdb, unsigned int ftable)
{
- ssize_t leftover;
-
- if (want_extra)
- datalen += datalen / 2;
- leftover = total_len - adjust_size(keylen, datalen);
+ tdb_off_t off;
+ unsigned int i;
- if (leftover < (ssize_t)sizeof(struct tdb_free_record))
- return 0;
+ if (likely(tdb->tdb2.ftable == ftable))
+ return tdb->tdb2.ftable_off;
- return leftover;
+ off = first_ftable(tdb);
+ for (i = 0; i < ftable; i++) {
+ if (TDB_OFF_IS_ERR(off)) {
+ break;
+ }
+ off = next_ftable(tdb, off);
+ }
+ return off;
}
-/* Note: we unlock the current bucket if we coalesce or fail. */
-static int coalesce(struct tdb_context *tdb,
- tdb_off_t zone_off, unsigned zone_bits,
- tdb_off_t off, tdb_off_t b_off, tdb_len_t data_len)
+/* Note: we unlock the current bucket if fail (-ve), or coalesce (+ve) and
+ * need to blatt the *protect record (which is set to an error). */
+static tdb_len_t coalesce(struct tdb_context *tdb,
+ tdb_off_t off, tdb_off_t b_off,
+ tdb_len_t data_len,
+ tdb_off_t *protect)
{
- struct tdb_free_record pad, *r;
- tdb_off_t zone_end, end;
+ tdb_off_t end;
+ struct tdb_free_record rec;
+ enum TDB_ERROR ecode;
+ tdb->stats.alloc_coalesce_tried++;
end = off + sizeof(struct tdb_used_record) + data_len;
- zone_end = zone_off + (1ULL << zone_bits);
- if (tdb->methods->oob(tdb, zone_end, true))
- zone_end = tdb->map_size;
-
- while (end < zone_end) {
+ while (end < tdb->file->map_size) {
+ const struct tdb_free_record *r;
tdb_off_t nb_off;
+ unsigned ftable, bucket;
- /* FIXME: do tdb_get here and below really win? */
- r = tdb_get(tdb, end, &pad, sizeof(pad));
- if (!r)
+ r = tdb_access_read(tdb, end, sizeof(*r), true);
+ if (TDB_PTR_IS_ERR(r)) {
+ ecode = TDB_PTR_ERR(r);
goto err;
+ }
- if (frec_magic(r) != TDB_FREE_MAGIC)
+ if (frec_magic(r) != TDB_FREE_MAGIC
+ || frec_ftable(r) == TDB_FTABLE_NONE) {
+ tdb_access_release(tdb, r);
break;
+ }
- nb_off = bucket_off(zone_off,
- size_to_bucket(zone_bits, r->data_len));
+ ftable = frec_ftable(r);
+ bucket = size_to_bucket(frec_len(r));
+ nb_off = ftable_offset(tdb, ftable);
+ if (TDB_OFF_IS_ERR(nb_off)) {
+ tdb_access_release(tdb, r);
+ ecode = TDB_OFF_TO_ERR(nb_off);
+ goto err;
+ }
+ nb_off = bucket_off(nb_off, bucket);
+ tdb_access_release(tdb, r);
/* We may be violating lock order here, so best effort. */
- if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT) == -1)
+ if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT)
+ != TDB_SUCCESS) {
+ tdb->stats.alloc_coalesce_lockfail++;
break;
+ }
/* Now we have lock, re-check. */
- r = tdb_get(tdb, end, &pad, sizeof(pad));
- if (!r) {
+ ecode = tdb_read_convert(tdb, end, &rec, sizeof(rec));
+ if (ecode != TDB_SUCCESS) {
tdb_unlock_free_bucket(tdb, nb_off);
goto err;
}
- if (unlikely(frec_magic(r) != TDB_FREE_MAGIC)) {
+ if (unlikely(frec_magic(&rec) != TDB_FREE_MAGIC)) {
+ tdb->stats.alloc_coalesce_race++;
tdb_unlock_free_bucket(tdb, nb_off);
break;
}
- if (unlikely(bucket_off(zone_off,
- size_to_bucket(zone_bits, r->data_len))
- != nb_off)) {
+ if (unlikely(frec_ftable(&rec) != ftable)
+ || unlikely(size_to_bucket(frec_len(&rec)) != bucket)) {
+ tdb->stats.alloc_coalesce_race++;
tdb_unlock_free_bucket(tdb, nb_off);
break;
}
- if (remove_from_list(tdb, nb_off, end, r) == -1) {
+ /* Did we just mess up a record you were hoping to use? */
+ if (end == *protect) {
+ tdb->stats.alloc_coalesce_iterate_clash++;
+ *protect = TDB_ERR_TO_OFF(TDB_ERR_NOEXIST);
+ }
+
+ ecode = remove_from_list(tdb, nb_off, end, &rec);
+ check_list(tdb, nb_off);
+ if (ecode != TDB_SUCCESS) {
tdb_unlock_free_bucket(tdb, nb_off);
goto err;
}
- end += sizeof(struct tdb_used_record) + r->data_len;
+ end += sizeof(struct tdb_used_record) + frec_len(&rec);
tdb_unlock_free_bucket(tdb, nb_off);
+ tdb->stats.alloc_coalesce_num_merged++;
}
/* Didn't find any adjacent free? */
if (end == off + sizeof(struct tdb_used_record) + data_len)
return 0;
- /* OK, expand record */
- r = tdb_get(tdb, off, &pad, sizeof(pad));
- if (!r)
- goto err;
+ /* Before we expand, check this isn't one you wanted protected? */
+ if (off == *protect) {
+ *protect = TDB_ERR_TO_OFF(TDB_ERR_EXISTS);
+ tdb->stats.alloc_coalesce_iterate_clash++;
+ }
- if (r->data_len != data_len) {
- tdb->ecode = TDB_ERR_CORRUPT;
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "coalesce: expected data len %llu not %llu\n",
- (long long)data_len, (long long)r->data_len);
+ /* OK, expand initial record */
+ ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
+ if (ecode != TDB_SUCCESS) {
goto err;
}
- if (remove_from_list(tdb, b_off, off, r) == -1)
+ if (frec_len(&rec) != data_len) {
+ ecode = tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
+ "coalesce: expected data len %zu not %zu",
+ (size_t)data_len, (size_t)frec_len(&rec));
goto err;
+ }
- r = tdb_access_write(tdb, off, sizeof(*r), true);
- if (!r)
+ ecode = remove_from_list(tdb, b_off, off, &rec);
+ check_list(tdb, b_off);
+ if (ecode != TDB_SUCCESS) {
goto err;
+ }
- /* We have to drop this to avoid deadlocks, so make sure record
- * doesn't get coalesced by someone else! */
- r->magic_and_meta = TDB_COALESCING_MAGIC | zone_bits;
- r->data_len = end - off - sizeof(struct tdb_used_record);
- if (tdb_access_commit(tdb, r) != 0)
- goto err;
+ /* Try locking violation first. We don't allow coalesce recursion! */
+ ecode = add_free_record(tdb, off, end - off, TDB_LOCK_NOWAIT, false);
+ if (ecode != TDB_SUCCESS) {
+ /* Need to drop lock. Can't rely on anything stable. */
+ tdb->stats.alloc_coalesce_lockfail++;
+ *protect = TDB_ERR_TO_OFF(TDB_ERR_CORRUPT);
+
+ /* We have to drop this to avoid deadlocks, so make sure record
+ * doesn't get coalesced by someone else! */
+ rec.ftable_and_len = (TDB_FTABLE_NONE
+ << (64 - TDB_OFF_UPPER_STEAL))
+ | (end - off - sizeof(struct tdb_used_record));
+ ecode = tdb_write_off(tdb,
+ off + offsetof(struct tdb_free_record,
+ ftable_and_len),
+ rec.ftable_and_len);
+ if (ecode != TDB_SUCCESS) {
+ goto err;
+ }
- tdb_unlock_free_bucket(tdb, b_off);
+ tdb_unlock_free_bucket(tdb, b_off);
+
+ ecode = add_free_record(tdb, off, end - off, TDB_LOCK_WAIT,
+ false);
+ if (ecode != TDB_SUCCESS) {
+ return TDB_ERR_TO_OFF(ecode);
+ }
+ } else if (TDB_OFF_IS_ERR(*protect)) {
+ /* For simplicity, we always drop lock if they can't continue */
+ tdb_unlock_free_bucket(tdb, b_off);
+ }
+ tdb->stats.alloc_coalesce_succeeded++;
- if (add_free_record(tdb, zone_bits, off, end - off) == -1)
- return -1;
- return 1;
+ /* Return usable length. */
+ return end - off - sizeof(struct tdb_used_record);
err:
/* To unify error paths, we *always* unlock bucket on error. */
tdb_unlock_free_bucket(tdb, b_off);
- return -1;
+ return TDB_ERR_TO_OFF(ecode);
+}
+
+/* List is locked: we unlock it. */
+static enum TDB_ERROR coalesce_list(struct tdb_context *tdb,
+ tdb_off_t ftable_off,
+ tdb_off_t b_off,
+ unsigned int limit)
+{
+ enum TDB_ERROR ecode;
+ tdb_off_t off;
+
+ off = tdb_read_off(tdb, b_off);
+ if (TDB_OFF_IS_ERR(off)) {
+ ecode = TDB_OFF_TO_ERR(off);
+ goto unlock_err;
+ }
+ /* A little bit of paranoia: counter should be 0. */
+ off &= TDB_OFF_MASK;
+
+ while (off && limit--) {
+ struct tdb_free_record rec;
+ tdb_len_t coal;
+ tdb_off_t next;
+
+ ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ next = rec.next;
+ coal = coalesce(tdb, off, b_off, frec_len(&rec), &next);
+ if (TDB_OFF_IS_ERR(coal)) {
+ /* This has already unlocked on error. */
+ return TDB_OFF_TO_ERR(coal);
+ }
+ if (TDB_OFF_IS_ERR(next)) {
+ /* Coalescing had to unlock, so stop. */
+ return TDB_SUCCESS;
+ }
+ /* Keep going if we're doing well... */
+ limit += size_to_bucket(coal / 16 + TDB_MIN_DATA_LEN);
+ off = next;
+ }
+
+ /* Now, move those elements to the tail of the list so we get something
+ * else next time. */
+ if (off) {
+ struct tdb_free_record oldhrec, newhrec, oldtrec, newtrec;
+ tdb_off_t oldhoff, oldtoff, newtoff;
+
+ /* The record we were up to is the new head. */
+ ecode = tdb_read_convert(tdb, off, &newhrec, sizeof(newhrec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ /* Get the new tail. */
+ newtoff = frec_prev(&newhrec);
+ ecode = tdb_read_convert(tdb, newtoff, &newtrec,
+ sizeof(newtrec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ /* Get the old head. */
+ oldhoff = tdb_read_off(tdb, b_off);
+ if (TDB_OFF_IS_ERR(oldhoff)) {
+ ecode = TDB_OFF_TO_ERR(oldhoff);
+ goto unlock_err;
+ }
+
+ /* This could happen if they all coalesced away. */
+ if (oldhoff == off)
+ goto out;
+
+ ecode = tdb_read_convert(tdb, oldhoff, &oldhrec,
+ sizeof(oldhrec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ /* Get the old tail. */
+ oldtoff = frec_prev(&oldhrec);
+ ecode = tdb_read_convert(tdb, oldtoff, &oldtrec,
+ sizeof(oldtrec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ /* Old tail's next points to old head. */
+ oldtrec.next = oldhoff;
+
+ /* Old head's prev points to old tail. */
+ oldhrec.magic_and_prev
+ = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL))
+ | oldtoff;
+
+ /* New tail's next is 0. */
+ newtrec.next = 0;
+
+ /* Write out the modified versions. */
+ ecode = tdb_write_convert(tdb, oldtoff, &oldtrec,
+ sizeof(oldtrec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ ecode = tdb_write_convert(tdb, oldhoff, &oldhrec,
+ sizeof(oldhrec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ ecode = tdb_write_convert(tdb, newtoff, &newtrec,
+ sizeof(newtrec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ /* And finally link in new head. */
+ ecode = tdb_write_off(tdb, b_off, off);
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+ }
+out:
+ tdb_unlock_free_bucket(tdb, b_off);
+ return TDB_SUCCESS;
+
+unlock_err:
+ tdb_unlock_free_bucket(tdb, b_off);
+ return ecode;
+}
+
+/* List must not be locked if coalesce_ok is set. */
+enum TDB_ERROR add_free_record(struct tdb_context *tdb,
+ tdb_off_t off, tdb_len_t len_with_header,
+ enum tdb_lock_flags waitflag,
+ bool coalesce)
+{
+ tdb_off_t b_off;
+ tdb_len_t len;
+ enum TDB_ERROR ecode;
+
+ assert(len_with_header >= sizeof(struct tdb_free_record));
+
+ len = len_with_header - sizeof(struct tdb_used_record);
+
+ b_off = bucket_off(tdb->tdb2.ftable_off, size_to_bucket(len));
+ ecode = tdb_lock_free_bucket(tdb, b_off, waitflag);
+ if (ecode != TDB_SUCCESS) {
+ return ecode;
+ }
+
+ ecode = enqueue_in_free(tdb, b_off, off, len, &coalesce);
+ check_list(tdb, b_off);
+
+ /* Coalescing unlocks free list. */
+ if (!ecode && coalesce)
+ ecode = coalesce_list(tdb, tdb->tdb2.ftable_off, b_off, 2);
+ else
+ tdb_unlock_free_bucket(tdb, b_off);
+ return ecode;
+}
+
+static size_t adjust_size(size_t keylen, size_t datalen)
+{
+ size_t size = keylen + datalen;
+
+ if (size < TDB_MIN_DATA_LEN)
+ size = TDB_MIN_DATA_LEN;
+
+ /* Round to next uint64_t boundary. */
+ return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
+}
+
+/* If we have enough left over to be useful, split that off. */
+static size_t record_leftover(size_t keylen, size_t datalen,
+ bool want_extra, size_t total_len)
+{
+ ssize_t leftover;
+
+ if (want_extra)
+ datalen += datalen / 2;
+ leftover = total_len - adjust_size(keylen, datalen);
+
+ if (leftover < (ssize_t)sizeof(struct tdb_free_record))
+ return 0;
+
+ return leftover;
}
/* We need size bytes to put our key and data in. */
static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
- tdb_off_t zone_off,
- unsigned zone_bits,
+ tdb_off_t ftable_off,
tdb_off_t bucket,
size_t keylen, size_t datalen,
bool want_extra,
+ unsigned magic,
unsigned hashlow)
{
tdb_off_t off, b_off,best_off;
- struct tdb_free_record pad, best = { 0 }, *r;
+ struct tdb_free_record best = { 0 };
double multiplier;
size_t size = adjust_size(keylen, datalen);
+ enum TDB_ERROR ecode;
-again:
- b_off = bucket_off(zone_off, bucket);
+ tdb->stats.allocs++;
+ b_off = bucket_off(ftable_off, bucket);
- /* FIXME: Try non-blocking wait first, to measure contention.
- * If we're contented, try switching zones, and don't enlarge zone
- * next time (we want more zones). */
+ /* FIXME: Try non-blocking wait first, to measure contention. */
/* Lock this bucket. */
- if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == -1) {
- return TDB_OFF_ERR;
+ ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT);
+ if (ecode != TDB_SUCCESS) {
+ return TDB_ERR_TO_OFF(ecode);
}
- best.data_len = -1ULL;
+ best.ftable_and_len = -1ULL;
best_off = 0;
/* Get slack if we're after extra. */
/* Walk the list to see if any are large enough, getting less fussy
* as we go. */
off = tdb_read_off(tdb, b_off);
- if (unlikely(off == TDB_OFF_ERR))
+ if (TDB_OFF_IS_ERR(off)) {
+ ecode = TDB_OFF_TO_ERR(off);
goto unlock_err;
+ }
+ off &= TDB_OFF_MASK;
while (off) {
- /* FIXME: Does tdb_get win anything here? */
- r = tdb_get(tdb, off, &pad, sizeof(*r));
- if (!r)
+ const struct tdb_free_record *r;
+ tdb_len_t len;
+ tdb_off_t next;
+
+ r = tdb_access_read(tdb, off, sizeof(*r), true);
+ if (TDB_PTR_IS_ERR(r)) {
+ ecode = TDB_PTR_ERR(r);
goto unlock_err;
+ }
if (frec_magic(r) != TDB_FREE_MAGIC) {
- tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
- "lock_and_alloc: %llu non-free 0x%llx\n",
- (long long)off, (long long)r->magic_and_meta);
+ ecode = tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
+ "lock_and_alloc:"
+ " %llu non-free 0x%llx",
+ (long long)off,
+ (long long)r->magic_and_prev);
+ tdb_access_release(tdb, r);
goto unlock_err;
}
- if (r->data_len >= size && r->data_len < best.data_len) {
+ if (frec_len(r) >= size && frec_len(r) < frec_len(&best)) {
best_off = off;
best = *r;
}
- if (best.data_len < size * multiplier && best_off)
+ if (frec_len(&best) <= size * multiplier && best_off) {
+ tdb_access_release(tdb, r);
break;
+ }
multiplier *= 1.01;
- /* Since we're going slow anyway, try coalescing here. */
- switch (coalesce(tdb, zone_off, zone_bits, off, b_off,
- r->data_len)) {
- case -1:
- /* This has already unlocked on error. */
- return -1;
- case 1:
- /* This has unlocked list, restart. */
- goto again;
- }
- off = r->next;
+ next = r->next;
+ len = frec_len(r);
+ tdb_access_release(tdb, r);
+ off = next;
}
/* If we found anything at all, use it. */
size_t leftover;
/* We're happy with this size: take it. */
- if (remove_from_list(tdb, b_off, best_off, &best) != 0)
+ ecode = remove_from_list(tdb, b_off, best_off, &best);
+ check_list(tdb, b_off);
+ if (ecode != TDB_SUCCESS) {
goto unlock_err;
+ }
leftover = record_leftover(keylen, datalen, want_extra,
- best.data_len);
+ frec_len(&best));
- assert(keylen + datalen + leftover <= best.data_len);
+ assert(keylen + datalen + leftover <= frec_len(&best));
/* We need to mark non-free before we drop lock, otherwise
* coalesce() could try to merge it! */
- if (set_header(tdb, &rec, keylen, datalen,
- best.data_len - leftover,
- hashlow, zone_bits) != 0)
+ ecode = set_header(tdb, &rec, magic, keylen, datalen,
+ frec_len(&best) - leftover, hashlow);
+ if (ecode != TDB_SUCCESS) {
goto unlock_err;
+ }
- if (tdb_write_convert(tdb, best_off, &rec, sizeof(rec)) != 0)
+ ecode = tdb_write_convert(tdb, best_off, &rec, sizeof(rec));
+ if (ecode != TDB_SUCCESS) {
goto unlock_err;
+ }
- tdb_unlock_free_bucket(tdb, b_off);
+ /* For futureproofing, we put a 0 in any unused space. */
+ if (rec_extra_padding(&rec)) {
+ ecode = tdb->tdb2.io->twrite(tdb, best_off + sizeof(rec)
+ + keylen + datalen, "", 1);
+ if (ecode != TDB_SUCCESS) {
+ goto unlock_err;
+ }
+ }
+ /* Bucket of leftover will be <= current bucket, so nested
+ * locking is allowed. */
if (leftover) {
- if (add_free_record(tdb, zone_bits,
- best_off + sizeof(rec)
- + best.data_len - leftover,
- leftover))
- return TDB_OFF_ERR;
+ tdb->stats.alloc_leftover++;
+ ecode = add_free_record(tdb,
+ best_off + sizeof(rec)
+ + frec_len(&best) - leftover,
+ leftover, TDB_LOCK_WAIT, false);
+ if (ecode != TDB_SUCCESS) {
+ best_off = TDB_ERR_TO_OFF(ecode);
+ }
}
+ tdb_unlock_free_bucket(tdb, b_off);
+
return best_off;
}
unlock_err:
tdb_unlock_free_bucket(tdb, b_off);
- return TDB_OFF_ERR;
-}
-
-static bool next_zone(struct tdb_context *tdb)
-{
- tdb_off_t next = tdb->zone_off + (1ULL << tdb->zhdr.zone_bits);
-
- /* We must have a header. */
- if (tdb->methods->oob(tdb, next + sizeof(tdb->zhdr), true))
- return false;
-
- tdb->zone_off = next;
- return tdb_read_convert(tdb, next, &tdb->zhdr, sizeof(tdb->zhdr)) == 0;
+ return TDB_ERR_TO_OFF(ecode);
}
-/* Offset returned is within current zone (which it may alter). */
+/* Get a free block from current free list, or 0 if none, -ve on error. */
static tdb_off_t get_free(struct tdb_context *tdb,
size_t keylen, size_t datalen, bool want_extra,
- unsigned hashlow)
+ unsigned magic, unsigned hashlow)
{
- tdb_off_t start_zone = tdb->zone_off, off;
+ tdb_off_t off, ftable_off;
+ tdb_off_t start_b, b, ftable;
bool wrapped = false;
- size_t size = adjust_size(keylen, datalen);
/* If they are growing, add 50% to get to higher bucket. */
if (want_extra)
- size += datalen / 2;
-
- /* FIXME: If we don't get a hit in the first bucket we want,
- * try changing zones for next time. That should help wear
- * zones evenly, so we don't need to search all of them before
- * expanding. */
- while (!wrapped || tdb->zone_off != start_zone) {
- tdb_off_t b;
-
- /* Shortcut for really huge allocations... */
- if ((size >> tdb->zhdr.zone_bits) != 0)
- goto next;
+ start_b = size_to_bucket(adjust_size(keylen,
+ datalen + datalen / 2));
+ else
+ start_b = size_to_bucket(adjust_size(keylen, datalen));
+ ftable_off = tdb->tdb2.ftable_off;
+ ftable = tdb->tdb2.ftable;
+ while (!wrapped || ftable_off != tdb->tdb2.ftable_off) {
/* Start at exact size bucket, and search up... */
- b = size_to_bucket(tdb->zhdr.zone_bits, size);
- for (b = find_free_head(tdb, b);
- b <= BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits);
- b = find_free_head(tdb, b + 1)) {
+ for (b = find_free_head(tdb, ftable_off, start_b);
+ b < TDB_FREE_BUCKETS;
+ b = find_free_head(tdb, ftable_off, b + 1)) {
/* Try getting one from list. */
- off = lock_and_alloc(tdb, tdb->zone_off,
- tdb->zhdr.zone_bits,
+ off = lock_and_alloc(tdb, ftable_off,
b, keylen, datalen, want_extra,
- hashlow);
- if (off == TDB_OFF_ERR)
- return TDB_OFF_ERR;
- if (off != 0)
+ magic, hashlow);
+ if (TDB_OFF_IS_ERR(off))
return off;
+ if (off != 0) {
+ if (b == start_b)
+ tdb->stats.alloc_bucket_exact++;
+ if (b == TDB_FREE_BUCKETS - 1)
+ tdb->stats.alloc_bucket_max++;
+ /* Worked? Stay using this list. */
+ tdb->tdb2.ftable_off = ftable_off;
+ tdb->tdb2.ftable = ftable;
+ return off;
+ }
/* Didn't work. Try next bucket. */
}
- next:
- /* Didn't work, try next zone, if it exists. */
- if (!next_zone(tdb)) {
+ if (TDB_OFF_IS_ERR(b)) {
+ return b;
+ }
+
+ /* Hmm, try next table. */
+ ftable_off = next_ftable(tdb, ftable_off);
+ if (TDB_OFF_IS_ERR(ftable_off)) {
+ return ftable_off;
+ }
+ ftable++;
+
+ if (ftable_off == 0) {
wrapped = true;
- tdb->zone_off = sizeof(struct tdb_header);
- if (tdb_read_convert(tdb, tdb->zone_off,
- &tdb->zhdr, sizeof(tdb->zhdr))) {
- return TDB_OFF_ERR;
+ ftable_off = first_ftable(tdb);
+ if (TDB_OFF_IS_ERR(ftable_off)) {
+ return ftable_off;
}
+ ftable = 0;
}
}
+
return 0;
}
-int set_header(struct tdb_context *tdb,
- struct tdb_used_record *rec,
- uint64_t keylen, uint64_t datalen,
- uint64_t actuallen, unsigned hashlow,
- unsigned int zone_bits)
+enum TDB_ERROR set_header(struct tdb_context *tdb,
+ struct tdb_used_record *rec,
+ unsigned magic, uint64_t keylen, uint64_t datalen,
+ uint64_t actuallen, unsigned hashlow)
{
uint64_t keybits = (fls64(keylen) + 1) / 2;
/* Use bottom bits of hash, so it's independent of hash table size. */
- rec->magic_and_meta
- = zone_bits
- | ((hashlow & ((1 << 5)-1)) << 6)
+ rec->magic_and_meta = (hashlow & ((1 << 11)-1))
| ((actuallen - (keylen + datalen)) << 11)
| (keybits << 43)
- | (TDB_MAGIC << 48);
+ | ((uint64_t)magic << 48);
rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
/* Encoding can fail on big values. */
if (rec_key_length(rec) != keylen
|| rec_data_length(rec) != datalen
|| rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
- tdb->ecode = TDB_ERR_IO;
- tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
- "Could not encode k=%llu,d=%llu,a=%llu\n",
- (long long)keylen, (long long)datalen,
- (long long)actuallen);
- return -1;
+ return tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
+ "Could not encode k=%llu,d=%llu,a=%llu",
+ (long long)keylen, (long long)datalen,
+ (long long)actuallen);
}
- return 0;
+ return TDB_SUCCESS;
}
-static bool zones_contended(struct tdb_context *tdb)
+/* Expand the database. */
+static enum TDB_ERROR tdb_expand(struct tdb_context *tdb, tdb_len_t size)
{
- return false;
-}
+ uint64_t old_size, rec_size, map_size;
+ tdb_len_t wanted;
+ enum TDB_ERROR ecode;
+
+ /* Need to hold a hash lock to expand DB: transactions rely on it. */
+ if (!(tdb->flags & TDB_NOLOCK)
+ && !tdb->file->allrecord_lock.count && !tdb_has_hash_locks(tdb)) {
+ return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
+ "tdb_expand: must hold lock during expand");
+ }
-/* Assume we want buckets up to the comfort factor. */
-static tdb_len_t overhead(unsigned int zone_bits)
-{
- return sizeof(struct free_zone_header)
- + (BUCKETS_FOR_ZONE(zone_bits) + 1) * sizeof(tdb_off_t);
-}
+ /* Only one person can expand file at a time. */
+ ecode = tdb_lock_expand(tdb, F_WRLCK);
+ if (ecode != TDB_SUCCESS) {
+ return ecode;
+ }
-/* Expand the database (by adding a zone). */
-static int tdb_expand(struct tdb_context *tdb, tdb_len_t size)
-{
- uint64_t old_size;
- tdb_off_t off;
- unsigned int num_buckets, zone_bits;
- tdb_len_t wanted, expand;
- struct free_zone_header zhdr;
+ /* Someone else may have expanded the file, so retry. */
+ old_size = tdb->file->map_size;
+ tdb->tdb2.io->oob(tdb, tdb->file->map_size + 1, true);
+ if (tdb->file->map_size != old_size) {
+ tdb_unlock_expand(tdb, F_WRLCK);
+ return TDB_SUCCESS;
+ }
- /* We need room for the record header too. */
- wanted = sizeof(struct tdb_used_record) + size;
+ /* limit size in order to avoid using up huge amounts of memory for
+ * in memory tdbs if an oddball huge record creeps in */
+ if (size > 100 * 1024) {
+ rec_size = size * 2;
+ } else {
+ rec_size = size * 100;
+ }
- /* Only one person can expand file at a time. */
- if (tdb_lock_expand(tdb, F_WRLCK) != 0)
- return -1;
+ /* always make room for at least rec_size more records, and at
+ least 25% more space. if the DB is smaller than 100MiB,
+ otherwise grow it by 10% only. */
+ if (old_size > 100 * 1024 * 1024) {
+ map_size = old_size / 10;
+ } else {
+ map_size = old_size / 4;
+ }
- /* Someone else may have expanded the file, so retry. */
- old_size = tdb->map_size;
- tdb->methods->oob(tdb, tdb->map_size + 1, true);
- if (tdb->map_size != old_size)
- goto success;
-
- /* Treat last zone as minimum reasonable zone size. */
- off = last_zone(tdb, &zhdr);
- if (off == TDB_OFF_ERR)
- goto fail;
-
- /* Zone isn't fully expanded? */
- if (tdb->map_size < off + (1ULL << zhdr.zone_bits)) {
- expand = off + (1ULL << zhdr.zone_bits) - tdb->map_size;
- /* Expand more than we want. */
- if (expand > (wanted << TDB_COMFORT_FACTOR_BITS))
- expand = (wanted << TDB_COMFORT_FACTOR_BITS);
- if (tdb->methods->expand_file(tdb, expand) == -1)
- goto fail;
- /* We need to drop this lock before adding free record. */
+ if (map_size > rec_size) {
+ wanted = map_size;
+ } else {
+ wanted = rec_size;
+ }
+
+ /* We need room for the record header too. */
+ wanted = adjust_size(0, sizeof(struct tdb_used_record) + wanted);
+
+ ecode = tdb->tdb2.io->expand_file(tdb, wanted);
+ if (ecode != TDB_SUCCESS) {
tdb_unlock_expand(tdb, F_WRLCK);
+ return ecode;
+ }
- /* Allocate from here. */
- tdb->zone_off = off;
- tdb->zhdr = zhdr;
-
- /* FIXME: If this isn't sufficient, we search again... */
- return add_free_record(tdb, zhdr.zone_bits,
- tdb->map_size - expand, expand);
- }
-
- /* We are never allowed to cross a power-of-two boundary, and our
- * minimum zone size is 1 << INITIAL_ZONE_BITS.
- *
- * If our filesize is 128k, we can add a 64k or a 128k zone. If it's
- * 192k, we can only add a 64k zone.
- *
- * In other words, our max zone size is (1 << (ffs(filesize) - 1)) */
- zone_bits = ffs64(old_size - sizeof(struct tdb_header)) - 1;
- assert(zone_bits >= INITIAL_ZONE_BITS);
-
- /* Big zones generally good, but more zones wanted if contended. */
- if (zones_contended(tdb)) {
- /* If it suffices, make zone same size as last one. */
- if (zhdr.zone_bits < zone_bits
- && (1ULL << zhdr.zone_bits) >= overhead(zone_bits)+wanted)
- zone_bits = zhdr.zone_bits;
- }
-
- zhdr.zone_bits = zone_bits;
- num_buckets = BUCKETS_FOR_ZONE(zone_bits);
-
- /* Expand the file by more than we need right now. */
- expand = 1ULL << zone_bits;
- if (expand > overhead(zone_bits) + (wanted << TDB_COMFORT_FACTOR_BITS))
- expand = overhead(zone_bits)
- + (wanted << TDB_COMFORT_FACTOR_BITS);
-
- if (tdb->methods->expand_file(tdb, expand) == -1)
- goto fail;
-
- /* Write new zone header (at old end). */
- off = old_size;
- if (tdb_write_convert(tdb, off, &zhdr, sizeof(zhdr)) == -1)
- goto fail;
-
- /* Now write empty buckets. */
- off += sizeof(zhdr);
- if (zero_out(tdb, off, (num_buckets+1) * sizeof(tdb_off_t)) == -1)
- goto fail;
- off += (num_buckets+1) * sizeof(tdb_off_t);
-
- /* Now add the rest as our free record. */
- if (add_free_record(tdb, zone_bits, off, expand - overhead(zone_bits))
- == -1)
- goto fail;
-
- /* Try allocating from this zone now. */
- tdb->zone_off = old_size;
- tdb->zhdr = zhdr;
-
-success:
+ /* We need to drop this lock before adding free record. */
tdb_unlock_expand(tdb, F_WRLCK);
- return 0;
-fail:
- tdb_unlock_expand(tdb, F_WRLCK);
- return -1;
+ tdb->stats.expands++;
+ return add_free_record(tdb, old_size, wanted, TDB_LOCK_WAIT, true);
}
/* This won't fail: it will expand the database if it has to. */
tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
- uint64_t hash, bool growing)
+ uint64_t hash, unsigned magic, bool growing)
{
tdb_off_t off;
/* We can't hold pointers during this: we could unmap! */
- assert(!tdb->direct_access);
+ assert(!tdb->tdb2.direct_access);
for (;;) {
- off = get_free(tdb, keylen, datalen, growing, hash);
+ enum TDB_ERROR ecode;
+ off = get_free(tdb, keylen, datalen, growing, magic, hash);
if (likely(off != 0))
break;
- if (tdb_expand(tdb, adjust_size(keylen, datalen)))
- return TDB_OFF_ERR;
+ ecode = tdb_expand(tdb, adjust_size(keylen, datalen));
+ if (ecode != TDB_SUCCESS) {
+ return TDB_ERR_TO_OFF(ecode);
+ }
}
return off;