tdb_off_t off, prev = 0, first;
struct tdb_free_record r;
- first = off = tdb_read_off(tdb, b_off);
+ first = off = (tdb_read_off(tdb, b_off) & TDB_OFF_MASK);
while (off != 0) {
tdb_read_convert(tdb, off, &r, sizeof(r));
if (frec_magic(&r) != TDB_FREE_MAGIC)
/* If prev->next == 0, we were head: update bucket to point to next. */
if (prev_next == 0) {
-#ifdef CCAN_TDB2_DEBUG
- if (tdb_read_off(tdb, b_off) != r_off) {
+ /* We must preserve upper bits. */
+ head = tdb_read_off(tdb, b_off);
+ if (TDB_OFF_IS_ERR(head))
+ return head;
+
+ if ((head & TDB_OFF_MASK) != r_off) {
return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR,
"remove_from_list:"
" %llu head %llu on list %llu",
(long long)r_off,
- (long long)tdb_read_off(tdb, b_off),
+ (long long)head,
(long long)b_off);
}
-#endif
- ecode = tdb_write_off(tdb, b_off, r->next);
+ head = ((head & ~TDB_OFF_MASK) | r->next);
+ ecode = tdb_write_off(tdb, b_off, head);
if (ecode != TDB_SUCCESS)
return ecode;
} else {
head = tdb_read_off(tdb, b_off);
if (TDB_OFF_IS_ERR(head))
return head;
+ head &= TDB_OFF_MASK;
off = head + offsetof(struct tdb_free_record, magic_and_prev);
} else {
/* off = &r->next->prev */
return tdb_write_off(tdb, off, r->magic_and_prev);
}
-/* Enqueue in this free bucket. */
+/* Enqueue in this free bucket: sets coalesce if we've added 128
+ * entries to it. */
static enum TDB_ERROR enqueue_in_free(struct tdb_context *tdb,
tdb_off_t b_off,
tdb_off_t off,
- tdb_len_t len)
+ tdb_len_t len,
+ bool *coalesce)
{
struct tdb_free_record new;
enum TDB_ERROR ecode;
- tdb_off_t prev;
+ tdb_off_t prev, head;
uint64_t magic = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL));
+ head = tdb_read_off(tdb, b_off);
+ if (TDB_OFF_IS_ERR(head))
+ return head;
+
/* We only need to set ftable_and_len; rest is set in enqueue_in_free */
new.ftable_and_len = ((uint64_t)tdb->ftable << (64 - TDB_OFF_UPPER_STEAL))
| len;
/* new->next = head. */
- new.next = tdb_read_off(tdb, b_off);
- if (TDB_OFF_IS_ERR(new.next)) {
- return new.next;
- }
+ new.next = (head & TDB_OFF_MASK);
/* First element? Prev points to ourselves. */
if (!new.next) {
}
#endif
}
- /* head = new */
- ecode = tdb_write_off(tdb, b_off, off);
- if (ecode != TDB_SUCCESS) {
- return ecode;
- }
-
- return tdb_write_convert(tdb, off, &new, sizeof(new));
-}
-
-/* List need not be locked. */
-enum TDB_ERROR add_free_record(struct tdb_context *tdb,
- tdb_off_t off, tdb_len_t len_with_header)
-{
- tdb_off_t b_off;
- tdb_len_t len;
- enum TDB_ERROR ecode;
-
- assert(len_with_header >= sizeof(struct tdb_free_record));
- len = len_with_header - sizeof(struct tdb_used_record);
+ /* Update enqueue count, but don't set high bit: see TDB_OFF_IS_ERR */
+ if (*coalesce)
+ head += (1ULL << (64 - TDB_OFF_UPPER_STEAL));
+ head &= ~(TDB_OFF_MASK | (1ULL << 63));
+ head |= off;
- b_off = bucket_off(tdb->ftable_off, size_to_bucket(len));
- ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT);
+ ecode = tdb_write_off(tdb, b_off, head);
if (ecode != TDB_SUCCESS) {
return ecode;
}
- ecode = enqueue_in_free(tdb, b_off, off, len);
- check_list(tdb, b_off);
- tdb_unlock_free_bucket(tdb, b_off);
- return ecode;
-}
-
-static size_t adjust_size(size_t keylen, size_t datalen)
-{
- size_t size = keylen + datalen;
-
- if (size < TDB_MIN_DATA_LEN)
- size = TDB_MIN_DATA_LEN;
-
- /* Round to next uint64_t boundary. */
- return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
-}
-
-/* If we have enough left over to be useful, split that off. */
-static size_t record_leftover(size_t keylen, size_t datalen,
- bool want_extra, size_t total_len)
-{
- ssize_t leftover;
+ /* It's time to coalesce if counter wrapped. */
+ if (*coalesce)
+ *coalesce = ((head & ~TDB_OFF_MASK) == 0);
- if (want_extra)
- datalen += datalen / 2;
- leftover = total_len - adjust_size(keylen, datalen);
-
- if (leftover < (ssize_t)sizeof(struct tdb_free_record))
- return 0;
-
- return leftover;
+ return tdb_write_convert(tdb, off, &new, sizeof(new));
}
static tdb_off_t ftable_offset(struct tdb_context *tdb, unsigned int ftable)
return off;
}
-/* Note: we unlock the current bucket if we coalesce or fail. */
-static tdb_bool_err coalesce(struct tdb_context *tdb,
- tdb_off_t off, tdb_off_t b_off,
- tdb_len_t data_len)
+/* Note: we unlock the current bucket if fail (-ve), or coalesce (+ve) and
+ * need to blatt the *protect record (which is set to an error). */
+static tdb_len_t coalesce(struct tdb_context *tdb,
+ tdb_off_t off, tdb_off_t b_off,
+ tdb_len_t data_len,
+ tdb_off_t *protect)
{
tdb_off_t end;
struct tdb_free_record rec;
break;
}
+ /* Did we just mess up a record you were hoping to use? */
+ if (end == *protect) {
+ tdb->stats.alloc_coalesce_iterate_clash++;
+ *protect = TDB_ERR_NOEXIST;
+ }
+
ecode = remove_from_list(tdb, nb_off, end, &rec);
check_list(tdb, nb_off);
if (ecode != TDB_SUCCESS) {
/* Didn't find any adjacent free? */
if (end == off + sizeof(struct tdb_used_record) + data_len)
- return false;
+ return 0;
+
+ /* Before we expand, check this isn't one you wanted protected? */
+ if (off == *protect) {
+ *protect = TDB_ERR_EXISTS;
+ tdb->stats.alloc_coalesce_iterate_clash++;
+ }
/* OK, expand initial record */
ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
goto err;
}
- /* We have to drop this to avoid deadlocks, so make sure record
- * doesn't get coalesced by someone else! */
- rec.ftable_and_len = (TDB_FTABLE_NONE << (64 - TDB_OFF_UPPER_STEAL))
- | (end - off - sizeof(struct tdb_used_record));
- ecode = tdb_write_off(tdb, off + offsetof(struct tdb_free_record,
- ftable_and_len),
- rec.ftable_and_len);
+ /* Try locking violation first. We don't allow coalesce recursion! */
+ ecode = add_free_record(tdb, off, end - off, TDB_LOCK_NOWAIT, false);
if (ecode != TDB_SUCCESS) {
- goto err;
- }
+ /* Need to drop lock. Can't rely on anything stable. */
+ tdb->stats.alloc_coalesce_lockfail++;
+ *protect = TDB_ERR_CORRUPT;
+
+ /* We have to drop this to avoid deadlocks, so make sure record
+ * doesn't get coalesced by someone else! */
+ rec.ftable_and_len = (TDB_FTABLE_NONE
+ << (64 - TDB_OFF_UPPER_STEAL))
+ | (end - off - sizeof(struct tdb_used_record));
+ ecode = tdb_write_off(tdb,
+ off + offsetof(struct tdb_free_record,
+ ftable_and_len),
+ rec.ftable_and_len);
+ if (ecode != TDB_SUCCESS) {
+ goto err;
+ }
+
+ tdb_unlock_free_bucket(tdb, b_off);
+ ecode = add_free_record(tdb, off, end - off, TDB_LOCK_WAIT,
+ false);
+ if (ecode != TDB_SUCCESS) {
+ return ecode;
+ }
+ } else if (TDB_OFF_IS_ERR(*protect)) {
+ /* For simplicity, we always drop lock if they can't continue */
+ tdb_unlock_free_bucket(tdb, b_off);
+ }
tdb->stats.alloc_coalesce_succeeded++;
+
+ /* Return usable length. */
+ return end - off - sizeof(struct tdb_used_record);
+
+err:
+ /* To unify error paths, we *always* unlock bucket on error. */
tdb_unlock_free_bucket(tdb, b_off);
+ return ecode;
+}
- ecode = add_free_record(tdb, off, end - off);
+/* List is locked: we unlock it. */
+static enum TDB_ERROR coalesce_list(struct tdb_context *tdb,
+ tdb_off_t ftable_off,
+ tdb_off_t b_off,
+ unsigned int limit)
+{
+ enum TDB_ERROR ecode;
+ tdb_off_t off;
+
+ off = tdb_read_off(tdb, b_off);
+ if (TDB_OFF_IS_ERR(off)) {
+ ecode = off;
+ goto unlock_err;
+ }
+ /* A little bit of paranoia: counter should be 0. */
+ off &= TDB_OFF_MASK;
+
+ while (off && limit--) {
+ struct tdb_free_record rec;
+ tdb_len_t coal;
+ tdb_off_t next;
+
+ ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ next = rec.next;
+ coal = coalesce(tdb, off, b_off, frec_len(&rec), &next);
+ if (TDB_OFF_IS_ERR(coal)) {
+ /* This has already unlocked on error. */
+ return coal;
+ }
+ if (TDB_OFF_IS_ERR(next)) {
+ /* Coalescing had to unlock, so stop. */
+ return TDB_SUCCESS;
+ }
+ /* Keep going if we're doing well... */
+ limit += size_to_bucket(coal / 16 + TDB_MIN_DATA_LEN);
+ off = next;
+ }
+
+ /* Now, move those elements to the tail of the list so we get something
+ * else next time. */
+ if (off) {
+ struct tdb_free_record oldhrec, newhrec, oldtrec, newtrec;
+ tdb_off_t oldhoff, oldtoff, newtoff;
+
+ /* The record we were up to is the new head. */
+ ecode = tdb_read_convert(tdb, off, &newhrec, sizeof(newhrec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ /* Get the new tail. */
+ newtoff = frec_prev(&newhrec);
+ ecode = tdb_read_convert(tdb, newtoff, &newtrec,
+ sizeof(newtrec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ /* Get the old head. */
+ oldhoff = tdb_read_off(tdb, b_off);
+ if (TDB_OFF_IS_ERR(oldhoff)) {
+ ecode = oldhoff;
+ goto unlock_err;
+ }
+
+ /* This could happen if they all coalesced away. */
+ if (oldhoff == off)
+ goto out;
+
+ ecode = tdb_read_convert(tdb, oldhoff, &oldhrec,
+ sizeof(oldhrec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ /* Get the old tail. */
+ oldtoff = frec_prev(&oldhrec);
+ ecode = tdb_read_convert(tdb, oldtoff, &oldtrec,
+ sizeof(oldtrec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ /* Old tail's next points to old head. */
+ oldtrec.next = oldhoff;
+
+ /* Old head's prev points to old tail. */
+ oldhrec.magic_and_prev
+ = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL))
+ | oldtoff;
+
+ /* New tail's next is 0. */
+ newtrec.next = 0;
+
+ /* Write out the modified versions. */
+ ecode = tdb_write_convert(tdb, oldtoff, &oldtrec,
+ sizeof(oldtrec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ ecode = tdb_write_convert(tdb, oldhoff, &oldhrec,
+ sizeof(oldhrec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ ecode = tdb_write_convert(tdb, newtoff, &newtrec,
+ sizeof(newtrec));
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+
+ /* And finally link in new head. */
+ ecode = tdb_write_off(tdb, b_off, off);
+ if (ecode != TDB_SUCCESS)
+ goto unlock_err;
+ }
+out:
+ tdb_unlock_free_bucket(tdb, b_off);
+ return TDB_SUCCESS;
+
+unlock_err:
+ tdb_unlock_free_bucket(tdb, b_off);
+ return ecode;
+}
+
+/* List must not be locked if coalesce_ok is set. */
+enum TDB_ERROR add_free_record(struct tdb_context *tdb,
+ tdb_off_t off, tdb_len_t len_with_header,
+ enum tdb_lock_flags waitflag,
+ bool coalesce)
+{
+ tdb_off_t b_off;
+ tdb_len_t len;
+ enum TDB_ERROR ecode;
+
+ assert(len_with_header >= sizeof(struct tdb_free_record));
+
+ len = len_with_header - sizeof(struct tdb_used_record);
+
+ b_off = bucket_off(tdb->ftable_off, size_to_bucket(len));
+ ecode = tdb_lock_free_bucket(tdb, b_off, waitflag);
if (ecode != TDB_SUCCESS) {
return ecode;
}
- return true;
-err:
- /* To unify error paths, we *always* unlock bucket on error. */
- tdb_unlock_free_bucket(tdb, b_off);
+ ecode = enqueue_in_free(tdb, b_off, off, len, &coalesce);
+ check_list(tdb, b_off);
+
+ /* Coalescing unlocks free list. */
+ if (!ecode && coalesce)
+ ecode = coalesce_list(tdb, tdb->ftable_off, b_off, 2);
+ else
+ tdb_unlock_free_bucket(tdb, b_off);
return ecode;
}
+static size_t adjust_size(size_t keylen, size_t datalen)
+{
+ size_t size = keylen + datalen;
+
+ if (size < TDB_MIN_DATA_LEN)
+ size = TDB_MIN_DATA_LEN;
+
+ /* Round to next uint64_t boundary. */
+ return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
+}
+
+/* If we have enough left over to be useful, split that off. */
+static size_t record_leftover(size_t keylen, size_t datalen,
+ bool want_extra, size_t total_len)
+{
+ ssize_t leftover;
+
+ if (want_extra)
+ datalen += datalen / 2;
+ leftover = total_len - adjust_size(keylen, datalen);
+
+ if (leftover < (ssize_t)sizeof(struct tdb_free_record))
+ return 0;
+
+ return leftover;
+}
+
/* We need size bytes to put our key and data in. */
static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
tdb_off_t ftable_off,
enum TDB_ERROR ecode;
tdb->stats.allocs++;
-again:
b_off = bucket_off(ftable_off, bucket);
/* FIXME: Try non-blocking wait first, to measure contention. */
ecode = off;
goto unlock_err;
}
+ off &= TDB_OFF_MASK;
while (off) {
const struct tdb_free_record *r;
tdb_len_t len;
tdb_off_t next;
- int coal;
r = tdb_access_read(tdb, off, sizeof(*r), true);
if (TDB_PTR_IS_ERR(r)) {
next = r->next;
len = frec_len(r);
tdb_access_release(tdb, r);
-
- /* Since we're going slow anyway, try coalescing here. */
- coal = coalesce(tdb, off, b_off, len);
- if (coal == 1) {
- /* This has unlocked list, restart. */
- goto again;
- }
- if (coal < 0) {
- /* This has already unlocked on error. */
- return coal;
- }
off = next;
}
ecode = add_free_record(tdb,
best_off + sizeof(rec)
+ frec_len(&best) - leftover,
- leftover);
+ leftover, TDB_LOCK_WAIT, false);
if (ecode != TDB_SUCCESS) {
best_off = ecode;
}
tdb_unlock_expand(tdb, F_WRLCK);
tdb->stats.expands++;
- return add_free_record(tdb, old_size, wanted);
+ return add_free_record(tdb, old_size, wanted, TDB_LOCK_WAIT, true);
}
/* This won't fail: it will expand the database if it has to. */