X-Git-Url: http://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Ftdb2%2Ffree.c;h=e693fe828ae3bebf801c653184256ace07e9d23e;hp=eaaeb3cf2bca594d8294ac9a25b533d45d671eea;hb=c438ec17d7b2efe76e56e5fc5ab88bd4a02735e8;hpb=6b999f4511e8458bd79eddc2951ab91511d23fa8 diff --git a/ccan/tdb2/free.c b/ccan/tdb2/free.c index eaaeb3cf..e693fe82 100644 --- a/ccan/tdb2/free.c +++ b/ccan/tdb2/free.c @@ -65,18 +65,18 @@ enum TDB_ERROR tdb_ftable_init(struct tdb_context *tdb) unsigned int rnd, max = 0, count = 0; tdb_off_t off; - tdb->ftable_off = off = first_ftable(tdb); - tdb->ftable = 0; + tdb->tdb2.ftable_off = off = first_ftable(tdb); + tdb->tdb2.ftable = 0; while (off) { if (TDB_OFF_IS_ERR(off)) { - return off; + return TDB_OFF_TO_ERR(off); } rnd = random(); if (rnd >= max) { - tdb->ftable_off = off; - tdb->ftable = count; + tdb->tdb2.ftable_off = off; + tdb->tdb2.ftable = count; max = rnd; } @@ -109,7 +109,7 @@ static void check_list(struct tdb_context *tdb, tdb_off_t b_off) tdb_off_t off, prev = 0, first; struct tdb_free_record r; - first = off = tdb_read_off(tdb, b_off); + first = off = (tdb_read_off(tdb, b_off) & TDB_OFF_MASK); while (off != 0) { tdb_read_convert(tdb, off, &r, sizeof(r)); if (frec_magic(&r) != TDB_FREE_MAGIC) @@ -146,21 +146,25 @@ static enum TDB_ERROR remove_from_list(struct tdb_context *tdb, /* Get prev->next */ prev_next = tdb_read_off(tdb, off); if (TDB_OFF_IS_ERR(prev_next)) - return prev_next; + return TDB_OFF_TO_ERR(prev_next); /* If prev->next == 0, we were head: update bucket to point to next. */ if (prev_next == 0) { -#ifdef CCAN_TDB2_DEBUG - if (tdb_read_off(tdb, b_off) != r_off) { + /* We must preserve upper bits. */ + head = tdb_read_off(tdb, b_off); + if (TDB_OFF_IS_ERR(head)) + return TDB_OFF_TO_ERR(head); + + if ((head & TDB_OFF_MASK) != r_off) { return tdb_logerr(tdb, TDB_ERR_CORRUPT, TDB_LOG_ERROR, "remove_from_list:" " %llu head %llu on list %llu", (long long)r_off, - (long long)tdb_read_off(tdb, b_off), + (long long)head, (long long)b_off); } -#endif - ecode = tdb_write_off(tdb, b_off, r->next); + head = ((head & ~TDB_OFF_MASK) | r->next); + ecode = tdb_write_off(tdb, b_off, head); if (ecode != TDB_SUCCESS) return ecode; } else { @@ -174,7 +178,8 @@ static enum TDB_ERROR remove_from_list(struct tdb_context *tdb, if (r->next == 0) { head = tdb_read_off(tdb, b_off); if (TDB_OFF_IS_ERR(head)) - return head; + return TDB_OFF_TO_ERR(head); + head &= TDB_OFF_MASK; off = head + offsetof(struct tdb_free_record, magic_and_prev); } else { /* off = &r->next->prev */ @@ -195,26 +200,29 @@ static enum TDB_ERROR remove_from_list(struct tdb_context *tdb, return tdb_write_off(tdb, off, r->magic_and_prev); } -/* Enqueue in this free bucket. */ +/* Enqueue in this free bucket: sets coalesce if we've added 128 + * entries to it. */ static enum TDB_ERROR enqueue_in_free(struct tdb_context *tdb, tdb_off_t b_off, tdb_off_t off, - tdb_len_t len) + tdb_len_t len, + bool *coalesce) { struct tdb_free_record new; enum TDB_ERROR ecode; - tdb_off_t prev; + tdb_off_t prev, head; uint64_t magic = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL)); + head = tdb_read_off(tdb, b_off); + if (TDB_OFF_IS_ERR(head)) + return TDB_OFF_TO_ERR(head); + /* We only need to set ftable_and_len; rest is set in enqueue_in_free */ - new.ftable_and_len = ((uint64_t)tdb->ftable << (64 - TDB_OFF_UPPER_STEAL)) + new.ftable_and_len = ((uint64_t)tdb->tdb2.ftable << (64 - TDB_OFF_UPPER_STEAL)) | len; /* new->next = head. */ - new.next = tdb_read_off(tdb, b_off); - if (TDB_OFF_IS_ERR(new.next)) { - return new.next; - } + new.next = (head & TDB_OFF_MASK); /* First element? Prev points to ourselves. */ if (!new.next) { @@ -255,64 +263,23 @@ static enum TDB_ERROR enqueue_in_free(struct tdb_context *tdb, } #endif } - /* head = new */ - ecode = tdb_write_off(tdb, b_off, off); - if (ecode != TDB_SUCCESS) { - return ecode; - } - return tdb_write_convert(tdb, off, &new, sizeof(new)); -} + /* Update enqueue count, but don't set high bit: see TDB_OFF_IS_ERR */ + if (*coalesce) + head += (1ULL << (64 - TDB_OFF_UPPER_STEAL)); + head &= ~(TDB_OFF_MASK | (1ULL << 63)); + head |= off; -/* List need not be locked. */ -enum TDB_ERROR add_free_record(struct tdb_context *tdb, - tdb_off_t off, tdb_len_t len_with_header) -{ - tdb_off_t b_off; - tdb_len_t len; - enum TDB_ERROR ecode; - - assert(len_with_header >= sizeof(struct tdb_free_record)); - - len = len_with_header - sizeof(struct tdb_used_record); - - b_off = bucket_off(tdb->ftable_off, size_to_bucket(len)); - ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT); + ecode = tdb_write_off(tdb, b_off, head); if (ecode != TDB_SUCCESS) { return ecode; } - ecode = enqueue_in_free(tdb, b_off, off, len); - check_list(tdb, b_off); - tdb_unlock_free_bucket(tdb, b_off); - return ecode; -} - -static size_t adjust_size(size_t keylen, size_t datalen) -{ - size_t size = keylen + datalen; - - if (size < TDB_MIN_DATA_LEN) - size = TDB_MIN_DATA_LEN; - - /* Round to next uint64_t boundary. */ - return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL); -} + /* It's time to coalesce if counter wrapped. */ + if (*coalesce) + *coalesce = ((head & ~TDB_OFF_MASK) == 0); -/* If we have enough left over to be useful, split that off. */ -static size_t record_leftover(size_t keylen, size_t datalen, - bool want_extra, size_t total_len) -{ - ssize_t leftover; - - if (want_extra) - datalen += datalen / 2; - leftover = total_len - adjust_size(keylen, datalen); - - if (leftover < (ssize_t)sizeof(struct tdb_free_record)) - return 0; - - return leftover; + return tdb_write_convert(tdb, off, &new, sizeof(new)); } static tdb_off_t ftable_offset(struct tdb_context *tdb, unsigned int ftable) @@ -320,8 +287,8 @@ static tdb_off_t ftable_offset(struct tdb_context *tdb, unsigned int ftable) tdb_off_t off; unsigned int i; - if (likely(tdb->ftable == ftable)) - return tdb->ftable_off; + if (likely(tdb->tdb2.ftable == ftable)) + return tdb->tdb2.ftable_off; off = first_ftable(tdb); for (i = 0; i < ftable; i++) { @@ -333,10 +300,12 @@ static tdb_off_t ftable_offset(struct tdb_context *tdb, unsigned int ftable) return off; } -/* Note: we unlock the current bucket if we coalesce (> 0) or fail (-ve). */ +/* Note: we unlock the current bucket if fail (-ve), or coalesce (+ve) and + * need to blatt the *protect record (which is set to an error). */ static tdb_len_t coalesce(struct tdb_context *tdb, tdb_off_t off, tdb_off_t b_off, - tdb_len_t data_len) + tdb_len_t data_len, + tdb_off_t *protect) { tdb_off_t end; struct tdb_free_record rec; @@ -367,7 +336,7 @@ static tdb_len_t coalesce(struct tdb_context *tdb, nb_off = ftable_offset(tdb, ftable); if (TDB_OFF_IS_ERR(nb_off)) { tdb_access_release(tdb, r); - ecode = nb_off; + ecode = TDB_OFF_TO_ERR(nb_off); goto err; } nb_off = bucket_off(nb_off, bucket); @@ -400,6 +369,12 @@ static tdb_len_t coalesce(struct tdb_context *tdb, break; } + /* Did we just mess up a record you were hoping to use? */ + if (end == *protect) { + tdb->stats.alloc_coalesce_iterate_clash++; + *protect = TDB_ERR_TO_OFF(TDB_ERR_NOEXIST); + } + ecode = remove_from_list(tdb, nb_off, end, &rec); check_list(tdb, nb_off); if (ecode != TDB_SUCCESS) { @@ -416,6 +391,12 @@ static tdb_len_t coalesce(struct tdb_context *tdb, if (end == off + sizeof(struct tdb_used_record) + data_len) return 0; + /* Before we expand, check this isn't one you wanted protected? */ + if (off == *protect) { + *protect = TDB_ERR_TO_OFF(TDB_ERR_EXISTS); + tdb->stats.alloc_coalesce_iterate_clash++; + } + /* OK, expand initial record */ ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec)); if (ecode != TDB_SUCCESS) { @@ -435,33 +416,229 @@ static tdb_len_t coalesce(struct tdb_context *tdb, goto err; } - /* We have to drop this to avoid deadlocks, so make sure record - * doesn't get coalesced by someone else! */ - rec.ftable_and_len = (TDB_FTABLE_NONE << (64 - TDB_OFF_UPPER_STEAL)) - | (end - off - sizeof(struct tdb_used_record)); - ecode = tdb_write_off(tdb, off + offsetof(struct tdb_free_record, - ftable_and_len), - rec.ftable_and_len); + /* Try locking violation first. We don't allow coalesce recursion! */ + ecode = add_free_record(tdb, off, end - off, TDB_LOCK_NOWAIT, false); if (ecode != TDB_SUCCESS) { - goto err; - } + /* Need to drop lock. Can't rely on anything stable. */ + tdb->stats.alloc_coalesce_lockfail++; + *protect = TDB_ERR_TO_OFF(TDB_ERR_CORRUPT); + + /* We have to drop this to avoid deadlocks, so make sure record + * doesn't get coalesced by someone else! */ + rec.ftable_and_len = (TDB_FTABLE_NONE + << (64 - TDB_OFF_UPPER_STEAL)) + | (end - off - sizeof(struct tdb_used_record)); + ecode = tdb_write_off(tdb, + off + offsetof(struct tdb_free_record, + ftable_and_len), + rec.ftable_and_len); + if (ecode != TDB_SUCCESS) { + goto err; + } - tdb->stats.alloc_coalesce_succeeded++; - tdb_unlock_free_bucket(tdb, b_off); + tdb_unlock_free_bucket(tdb, b_off); - ecode = add_free_record(tdb, off, end - off); - if (ecode != TDB_SUCCESS) { - return ecode; + ecode = add_free_record(tdb, off, end - off, TDB_LOCK_WAIT, + false); + if (ecode != TDB_SUCCESS) { + return TDB_ERR_TO_OFF(ecode); + } + } else if (TDB_OFF_IS_ERR(*protect)) { + /* For simplicity, we always drop lock if they can't continue */ + tdb_unlock_free_bucket(tdb, b_off); } + tdb->stats.alloc_coalesce_succeeded++; + /* Return usable length. */ return end - off - sizeof(struct tdb_used_record); err: /* To unify error paths, we *always* unlock bucket on error. */ tdb_unlock_free_bucket(tdb, b_off); + return TDB_ERR_TO_OFF(ecode); +} + +/* List is locked: we unlock it. */ +static enum TDB_ERROR coalesce_list(struct tdb_context *tdb, + tdb_off_t ftable_off, + tdb_off_t b_off, + unsigned int limit) +{ + enum TDB_ERROR ecode; + tdb_off_t off; + + off = tdb_read_off(tdb, b_off); + if (TDB_OFF_IS_ERR(off)) { + ecode = TDB_OFF_TO_ERR(off); + goto unlock_err; + } + /* A little bit of paranoia: counter should be 0. */ + off &= TDB_OFF_MASK; + + while (off && limit--) { + struct tdb_free_record rec; + tdb_len_t coal; + tdb_off_t next; + + ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec)); + if (ecode != TDB_SUCCESS) + goto unlock_err; + + next = rec.next; + coal = coalesce(tdb, off, b_off, frec_len(&rec), &next); + if (TDB_OFF_IS_ERR(coal)) { + /* This has already unlocked on error. */ + return TDB_OFF_TO_ERR(coal); + } + if (TDB_OFF_IS_ERR(next)) { + /* Coalescing had to unlock, so stop. */ + return TDB_SUCCESS; + } + /* Keep going if we're doing well... */ + limit += size_to_bucket(coal / 16 + TDB_MIN_DATA_LEN); + off = next; + } + + /* Now, move those elements to the tail of the list so we get something + * else next time. */ + if (off) { + struct tdb_free_record oldhrec, newhrec, oldtrec, newtrec; + tdb_off_t oldhoff, oldtoff, newtoff; + + /* The record we were up to is the new head. */ + ecode = tdb_read_convert(tdb, off, &newhrec, sizeof(newhrec)); + if (ecode != TDB_SUCCESS) + goto unlock_err; + + /* Get the new tail. */ + newtoff = frec_prev(&newhrec); + ecode = tdb_read_convert(tdb, newtoff, &newtrec, + sizeof(newtrec)); + if (ecode != TDB_SUCCESS) + goto unlock_err; + + /* Get the old head. */ + oldhoff = tdb_read_off(tdb, b_off); + if (TDB_OFF_IS_ERR(oldhoff)) { + ecode = TDB_OFF_TO_ERR(oldhoff); + goto unlock_err; + } + + /* This could happen if they all coalesced away. */ + if (oldhoff == off) + goto out; + + ecode = tdb_read_convert(tdb, oldhoff, &oldhrec, + sizeof(oldhrec)); + if (ecode != TDB_SUCCESS) + goto unlock_err; + + /* Get the old tail. */ + oldtoff = frec_prev(&oldhrec); + ecode = tdb_read_convert(tdb, oldtoff, &oldtrec, + sizeof(oldtrec)); + if (ecode != TDB_SUCCESS) + goto unlock_err; + + /* Old tail's next points to old head. */ + oldtrec.next = oldhoff; + + /* Old head's prev points to old tail. */ + oldhrec.magic_and_prev + = (TDB_FREE_MAGIC << (64 - TDB_OFF_UPPER_STEAL)) + | oldtoff; + + /* New tail's next is 0. */ + newtrec.next = 0; + + /* Write out the modified versions. */ + ecode = tdb_write_convert(tdb, oldtoff, &oldtrec, + sizeof(oldtrec)); + if (ecode != TDB_SUCCESS) + goto unlock_err; + + ecode = tdb_write_convert(tdb, oldhoff, &oldhrec, + sizeof(oldhrec)); + if (ecode != TDB_SUCCESS) + goto unlock_err; + + ecode = tdb_write_convert(tdb, newtoff, &newtrec, + sizeof(newtrec)); + if (ecode != TDB_SUCCESS) + goto unlock_err; + + /* And finally link in new head. */ + ecode = tdb_write_off(tdb, b_off, off); + if (ecode != TDB_SUCCESS) + goto unlock_err; + } +out: + tdb_unlock_free_bucket(tdb, b_off); + return TDB_SUCCESS; + +unlock_err: + tdb_unlock_free_bucket(tdb, b_off); + return ecode; +} + +/* List must not be locked if coalesce_ok is set. */ +enum TDB_ERROR add_free_record(struct tdb_context *tdb, + tdb_off_t off, tdb_len_t len_with_header, + enum tdb_lock_flags waitflag, + bool coalesce) +{ + tdb_off_t b_off; + tdb_len_t len; + enum TDB_ERROR ecode; + + assert(len_with_header >= sizeof(struct tdb_free_record)); + + len = len_with_header - sizeof(struct tdb_used_record); + + b_off = bucket_off(tdb->tdb2.ftable_off, size_to_bucket(len)); + ecode = tdb_lock_free_bucket(tdb, b_off, waitflag); + if (ecode != TDB_SUCCESS) { + return ecode; + } + + ecode = enqueue_in_free(tdb, b_off, off, len, &coalesce); + check_list(tdb, b_off); + + /* Coalescing unlocks free list. */ + if (!ecode && coalesce) + ecode = coalesce_list(tdb, tdb->tdb2.ftable_off, b_off, 2); + else + tdb_unlock_free_bucket(tdb, b_off); return ecode; } +static size_t adjust_size(size_t keylen, size_t datalen) +{ + size_t size = keylen + datalen; + + if (size < TDB_MIN_DATA_LEN) + size = TDB_MIN_DATA_LEN; + + /* Round to next uint64_t boundary. */ + return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL); +} + +/* If we have enough left over to be useful, split that off. */ +static size_t record_leftover(size_t keylen, size_t datalen, + bool want_extra, size_t total_len) +{ + ssize_t leftover; + + if (want_extra) + datalen += datalen / 2; + leftover = total_len - adjust_size(keylen, datalen); + + if (leftover < (ssize_t)sizeof(struct tdb_free_record)) + return 0; + + return leftover; +} + /* We need size bytes to put our key and data in. */ static tdb_off_t lock_and_alloc(struct tdb_context *tdb, tdb_off_t ftable_off, @@ -478,14 +655,13 @@ static tdb_off_t lock_and_alloc(struct tdb_context *tdb, enum TDB_ERROR ecode; tdb->stats.allocs++; -again: b_off = bucket_off(ftable_off, bucket); /* FIXME: Try non-blocking wait first, to measure contention. */ /* Lock this bucket. */ ecode = tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT); if (ecode != TDB_SUCCESS) { - return ecode; + return TDB_ERR_TO_OFF(ecode); } best.ftable_and_len = -1ULL; @@ -501,13 +677,14 @@ again: * as we go. */ off = tdb_read_off(tdb, b_off); if (TDB_OFF_IS_ERR(off)) { - ecode = off; + ecode = TDB_OFF_TO_ERR(off); goto unlock_err; } + off &= TDB_OFF_MASK; while (off) { const struct tdb_free_record *r; - tdb_len_t len, coal; + tdb_len_t len; tdb_off_t next; r = tdb_access_read(tdb, off, sizeof(*r), true); @@ -541,17 +718,6 @@ again: next = r->next; len = frec_len(r); tdb_access_release(tdb, r); - - /* Since we're going slow anyway, try coalescing here. */ - coal = coalesce(tdb, off, b_off, len); - if (TDB_OFF_IS_ERR(coal)) { - /* This has already unlocked on error. */ - return coal; - } - if (coal > 0) { - /* This has unlocked list, restart. */ - goto again; - } off = next; } @@ -586,7 +752,7 @@ again: /* For futureproofing, we put a 0 in any unused space. */ if (rec_extra_padding(&rec)) { - ecode = tdb->methods->twrite(tdb, best_off + sizeof(rec) + ecode = tdb->tdb2.io->twrite(tdb, best_off + sizeof(rec) + keylen + datalen, "", 1); if (ecode != TDB_SUCCESS) { goto unlock_err; @@ -600,9 +766,9 @@ again: ecode = add_free_record(tdb, best_off + sizeof(rec) + frec_len(&best) - leftover, - leftover); + leftover, TDB_LOCK_WAIT, false); if (ecode != TDB_SUCCESS) { - best_off = ecode; + best_off = TDB_ERR_TO_OFF(ecode); } } tdb_unlock_free_bucket(tdb, b_off); @@ -615,7 +781,7 @@ again: unlock_err: tdb_unlock_free_bucket(tdb, b_off); - return ecode; + return TDB_ERR_TO_OFF(ecode); } /* Get a free block from current free list, or 0 if none, -ve on error. */ @@ -634,9 +800,9 @@ static tdb_off_t get_free(struct tdb_context *tdb, else start_b = size_to_bucket(adjust_size(keylen, datalen)); - ftable_off = tdb->ftable_off; - ftable = tdb->ftable; - while (!wrapped || ftable_off != tdb->ftable_off) { + ftable_off = tdb->tdb2.ftable_off; + ftable = tdb->tdb2.ftable; + while (!wrapped || ftable_off != tdb->tdb2.ftable_off) { /* Start at exact size bucket, and search up... */ for (b = find_free_head(tdb, ftable_off, start_b); b < TDB_FREE_BUCKETS; @@ -653,8 +819,8 @@ static tdb_off_t get_free(struct tdb_context *tdb, if (b == TDB_FREE_BUCKETS - 1) tdb->stats.alloc_bucket_max++; /* Worked? Stay using this list. */ - tdb->ftable_off = ftable_off; - tdb->ftable = ftable; + tdb->tdb2.ftable_off = ftable_off; + tdb->tdb2.ftable = ftable; return off; } /* Didn't work. Try next bucket. */ @@ -710,10 +876,38 @@ enum TDB_ERROR set_header(struct tdb_context *tdb, return TDB_SUCCESS; } +/* You need 'size', this tells you how much you should expand by. */ +tdb_off_t tdb_expand_adjust(tdb_off_t map_size, tdb_off_t size) +{ + tdb_off_t new_size, top_size; + + /* limit size in order to avoid using up huge amounts of memory for + * in memory tdbs if an oddball huge record creeps in */ + if (size > 100 * 1024) { + top_size = map_size + size * 2; + } else { + top_size = map_size + size * 100; + } + + /* always make room for at least top_size more records, and at + least 25% more space. if the DB is smaller than 100MiB, + otherwise grow it by 10% only. */ + if (map_size > 100 * 1024 * 1024) { + new_size = map_size * 1.10; + } else { + new_size = map_size * 1.25; + } + + /* Round the database up to a multiple of the page size */ + if (new_size < top_size) + new_size = top_size; + return new_size - map_size; +} + /* Expand the database. */ static enum TDB_ERROR tdb_expand(struct tdb_context *tdb, tdb_len_t size) { - uint64_t old_size, rec_size, map_size; + uint64_t old_size; tdb_len_t wanted; enum TDB_ERROR ecode; @@ -732,39 +926,18 @@ static enum TDB_ERROR tdb_expand(struct tdb_context *tdb, tdb_len_t size) /* Someone else may have expanded the file, so retry. */ old_size = tdb->file->map_size; - tdb->methods->oob(tdb, tdb->file->map_size + 1, true); + tdb->tdb2.io->oob(tdb, tdb->file->map_size, 1, true); if (tdb->file->map_size != old_size) { tdb_unlock_expand(tdb, F_WRLCK); return TDB_SUCCESS; } - /* limit size in order to avoid using up huge amounts of memory for - * in memory tdbs if an oddball huge record creeps in */ - if (size > 100 * 1024) { - rec_size = size * 2; - } else { - rec_size = size * 100; - } - - /* always make room for at least rec_size more records, and at - least 25% more space. if the DB is smaller than 100MiB, - otherwise grow it by 10% only. */ - if (old_size > 100 * 1024 * 1024) { - map_size = old_size / 10; - } else { - map_size = old_size / 4; - } - - if (map_size > rec_size) { - wanted = map_size; - } else { - wanted = rec_size; - } - + /* Overallocate. */ + wanted = tdb_expand_adjust(old_size, size); /* We need room for the record header too. */ wanted = adjust_size(0, sizeof(struct tdb_used_record) + wanted); - ecode = tdb->methods->expand_file(tdb, wanted); + ecode = tdb->tdb2.io->expand_file(tdb, wanted); if (ecode != TDB_SUCCESS) { tdb_unlock_expand(tdb, F_WRLCK); return ecode; @@ -774,7 +947,7 @@ static enum TDB_ERROR tdb_expand(struct tdb_context *tdb, tdb_len_t size) tdb_unlock_expand(tdb, F_WRLCK); tdb->stats.expands++; - return add_free_record(tdb, old_size, wanted); + return add_free_record(tdb, old_size, wanted, TDB_LOCK_WAIT, true); } /* This won't fail: it will expand the database if it has to. */ @@ -784,7 +957,7 @@ tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen, tdb_off_t off; /* We can't hold pointers during this: we could unmap! */ - assert(!tdb->direct_access); + assert(!tdb->tdb2.direct_access); for (;;) { enum TDB_ERROR ecode; @@ -794,7 +967,7 @@ tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen, ecode = tdb_expand(tdb, adjust_size(keylen, datalen)); if (ecode != TDB_SUCCESS) { - return ecode; + return TDB_ERR_TO_OFF(ecode); } }