2 #include <ccan/tdb2/tdb2.h>
3 #include <ccan/hash/hash.h>
4 #include <ccan/build_assert/build_assert.h>
5 #include <ccan/likely/likely.h>
9 struct tdb_data tdb_null = { .dptr = NULL, .dsize = 0 };
11 /* all contexts, to ensure no double-opens (fcntl locks don't nest!) */
12 static struct tdb_context *tdbs = NULL;
14 PRINTF_ATTRIBUTE(4, 5) static void
15 null_log_fn(struct tdb_context *tdb,
16 enum tdb_debug_level level, void *priv,
21 /* We do a lot of work assuming our copy of the header volatile area
22 * is uptodate, and usually it is. However, once we grab a lock, we have to
24 bool update_header(struct tdb_context *tdb)
26 struct tdb_header_volatile pad, *v;
28 if (tdb->header_uptodate) {
29 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
30 "warning: header uptodate already\n");
33 /* We could get a partial update if we're not holding any locks. */
34 assert(tdb_has_locks(tdb));
36 v = tdb_get(tdb, offsetof(struct tdb_header, v), &pad, sizeof(*v));
38 /* On failure, imply we updated header so they retry. */
41 tdb->header_uptodate = true;
42 if (likely(memcmp(&tdb->header.v, v, sizeof(*v)) == 0)) {
49 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
52 return hash64_stable((const unsigned char *)key, length, seed);
55 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
57 return tdb->khash(ptr, len, tdb->header.hash_seed, tdb->hash_priv);
60 static bool tdb_already_open(dev_t device, ino_t ino)
62 struct tdb_context *i;
64 for (i = tdbs; i; i = i->next) {
65 if (i->device == device && i->inode == ino) {
73 static uint64_t random_number(struct tdb_context *tdb)
79 fd = open("/dev/urandom", O_RDONLY);
81 if (tdb_read_all(fd, &ret, sizeof(ret))) {
82 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
83 "tdb_open: random from /dev/urandom\n");
89 /* FIXME: Untested! Based on Wikipedia protocol description! */
90 fd = open("/dev/egd-pool", O_RDWR);
92 /* Command is 1, next byte is size we want to read. */
93 char cmd[2] = { 1, sizeof(uint64_t) };
94 if (write(fd, cmd, sizeof(cmd)) == sizeof(cmd)) {
95 char reply[1 + sizeof(uint64_t)];
96 int r = read(fd, reply, sizeof(reply));
98 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
99 "tdb_open: %u random bytes from"
100 " /dev/egd-pool\n", r-1);
101 /* Copy at least some bytes. */
102 memcpy(&ret, reply+1, r - 1);
103 if (reply[0] == sizeof(uint64_t)
104 && r == sizeof(reply)) {
113 /* Fallback: pid and time. */
114 gettimeofday(&now, NULL);
115 ret = getpid() * 100132289ULL + now.tv_sec * 1000000ULL + now.tv_usec;
116 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
117 "tdb_open: random from getpid and time\n");
121 struct new_database {
122 struct tdb_header hdr;
123 struct tdb_used_record hrec;
124 tdb_off_t hash[1ULL << INITIAL_HASH_BITS];
125 struct tdb_used_record frec;
126 tdb_off_t free[INITIAL_FREE_BUCKETS + 1]; /* One overflow bucket */
129 /* initialise a new database */
130 static int tdb_new_database(struct tdb_context *tdb)
132 /* We make it up in memory, then write it out if not internal */
133 struct new_database newdb;
134 unsigned int magic_off = offsetof(struct tdb_header, magic_food);
136 /* Fill in the header */
137 newdb.hdr.version = TDB_VERSION;
138 newdb.hdr.hash_seed = random_number(tdb);
139 newdb.hdr.hash_test = TDB_HASH_MAGIC;
140 newdb.hdr.hash_test = tdb->khash(&newdb.hdr.hash_test,
141 sizeof(newdb.hdr.hash_test),
145 newdb.hdr.v.generation = 0;
147 /* The initial zone must cover the initial database size! */
148 BUILD_ASSERT((1ULL << INITIAL_ZONE_BITS) >= sizeof(newdb));
150 /* Free array has 1 zone, 10 buckets. All buckets empty. */
151 newdb.hdr.v.num_zones = 1;
152 newdb.hdr.v.zone_bits = INITIAL_ZONE_BITS;
153 newdb.hdr.v.free_buckets = INITIAL_FREE_BUCKETS;
154 newdb.hdr.v.free_off = offsetof(struct new_database, free);
155 set_header(tdb, &newdb.frec, 0,
156 sizeof(newdb.free), sizeof(newdb.free), 0);
157 memset(newdb.free, 0, sizeof(newdb.free));
159 /* Initial hashes are empty. */
160 newdb.hdr.v.hash_bits = INITIAL_HASH_BITS;
161 newdb.hdr.v.hash_off = offsetof(struct new_database, hash);
162 set_header(tdb, &newdb.hrec, 0,
163 sizeof(newdb.hash), sizeof(newdb.hash), 0);
164 memset(newdb.hash, 0, sizeof(newdb.hash));
167 memset(newdb.hdr.magic_food, 0, sizeof(newdb.hdr.magic_food));
168 strcpy(newdb.hdr.magic_food, TDB_MAGIC_FOOD);
170 /* This creates an endian-converted database, as if read from disk */
172 (char *)&newdb.hdr + magic_off,
173 sizeof(newdb) - magic_off);
175 tdb->header = newdb.hdr;
177 if (tdb->flags & TDB_INTERNAL) {
178 tdb->map_size = sizeof(newdb);
179 tdb->map_ptr = malloc(tdb->map_size);
181 tdb->ecode = TDB_ERR_OOM;
184 memcpy(tdb->map_ptr, &newdb, tdb->map_size);
187 if (lseek(tdb->fd, 0, SEEK_SET) == -1)
190 if (ftruncate(tdb->fd, 0) == -1)
193 if (!tdb_pwrite_all(tdb->fd, &newdb, sizeof(newdb), 0)) {
194 tdb->ecode = TDB_ERR_IO;
200 struct tdb_context *tdb_open(const char *name, int tdb_flags,
201 int open_flags, mode_t mode,
202 union tdb_attribute *attr)
204 struct tdb_context *tdb;
210 if (!(tdb = (struct tdb_context *)calloc(1, sizeof *tdb))) {
218 tdb->flags = tdb_flags;
219 tdb->log = null_log_fn;
220 tdb->log_priv = NULL;
221 tdb->khash = jenkins_hash;
222 tdb->hash_priv = NULL;
227 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
228 "tdb_open: attributes not yet supported\n");
233 if ((open_flags & O_ACCMODE) == O_WRONLY) {
234 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
235 "tdb_open: can't open tdb %s write-only\n", name);
240 if ((open_flags & O_ACCMODE) == O_RDONLY) {
242 /* read only databases don't do locking */
243 tdb->flags |= TDB_NOLOCK;
246 /* internal databases don't mmap or lock */
247 if (tdb->flags & TDB_INTERNAL) {
248 tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP);
249 if (tdb_new_database(tdb) != 0) {
250 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
251 "tdb_open: tdb_new_database failed!");
254 TEST_IT(tdb->flags & TDB_CONVERT);
255 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
259 if ((tdb->fd = open(name, open_flags, mode)) == -1) {
260 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
261 "tdb_open: could not open file %s: %s\n",
262 name, strerror(errno));
263 goto fail; /* errno set by open(2) */
266 /* on exec, don't inherit the fd */
267 v = fcntl(tdb->fd, F_GETFD, 0);
268 fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC);
270 /* ensure there is only one process initialising at once */
271 if (tdb_lock_open(tdb) == -1) {
272 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
273 "tdb_open: failed to get open lock on %s: %s\n",
274 name, strerror(errno));
275 goto fail; /* errno set by tdb_brlock */
278 if (!tdb_pread_all(tdb->fd, &tdb->header, sizeof(tdb->header), 0)
279 || strcmp(tdb->header.magic_food, TDB_MAGIC_FOOD) != 0) {
280 if (!(open_flags & O_CREAT) || tdb_new_database(tdb) == -1) {
282 errno = EIO; /* ie bad format or something */
286 } else if (tdb->header.version != TDB_VERSION) {
287 if (tdb->header.version == bswap_64(TDB_VERSION))
288 tdb->flags |= TDB_CONVERT;
291 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
292 "tdb_open: %s is unknown version 0x%llx\n",
293 name, (long long)tdb->header.version);
299 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
300 hash_test = TDB_HASH_MAGIC;
301 hash_test = tdb->khash(&hash_test, sizeof(hash_test),
302 tdb->header.hash_seed, tdb->hash_priv);
303 if (tdb->header.hash_test != hash_test) {
304 /* wrong hash variant */
305 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
306 "tdb_open: %s uses a different hash function\n",
312 if (fstat(tdb->fd, &st) == -1)
315 /* Is it already in the open list? If so, fail. */
316 if (tdb_already_open(st.st_dev, st.st_ino)) {
318 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
319 "tdb_open: %s (%d,%d) is already open in this process\n",
320 name, (int)st.st_dev, (int)st.st_ino);
325 tdb->name = strdup(name);
331 tdb->map_size = st.st_size;
332 tdb->device = st.st_dev;
333 tdb->inode = st.st_ino;
338 /* Internal (memory-only) databases skip all the code above to
339 * do with disk files, and resume here by releasing their
340 * open lock and hooking into the active list. */
341 tdb_unlock_open(tdb);
342 tdb->last_zone = random_free_zone(tdb);
357 if (tdb->flags & TDB_INTERNAL) {
362 free((char *)tdb->name);
364 if (close(tdb->fd) != 0)
365 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
366 "tdb_open: failed to close tdb->fd"
373 static int tdb_key_compare(TDB_DATA key, TDB_DATA data, void *private_data)
375 return memcmp(data.dptr, key.dptr, data.dsize) == 0;
378 static void unlock_lists(struct tdb_context *tdb,
379 uint64_t start, uint64_t end, int ltype)
382 tdb_unlock_list(tdb, start, ltype);
383 start = (start + ((1ULL << tdb->header.v.hash_bits) - 1))
384 & ((1ULL << tdb->header.v.hash_bits) - 1);
385 } while (start != end);
388 /* FIXME: Return header copy? */
389 /* Returns -1 or offset of entry (0 if not found).
390 * Locks hash entried from *start to *end (where the entry was found). */
391 static tdb_off_t find_bucket_and_lock(struct tdb_context *tdb,
392 const struct tdb_data *key,
402 /* hash_bits might be out of date... */
404 *start = *end = hash & ((1ULL << tdb->header.v.hash_bits) - 1);
405 hextra = hash >> tdb->header.v.hash_bits;
407 /* FIXME: can we avoid locks for some fast paths? */
408 if (tdb_lock_list(tdb, *end, ltype, TDB_LOCK_WAIT) == -1)
411 /* We only need to check this for first lock. */
412 if (unlikely(update_header(tdb))) {
413 tdb_unlock_list(tdb, *end, ltype);
417 while ((off = tdb_read_off(tdb, tdb->header.v.hash_off
418 + *end * sizeof(tdb_off_t)))
420 struct tdb_used_record pad, *r;
421 uint64_t keylen, next;
423 /* Didn't find it? */
427 #if 0 /* FIXME: Check other bits. */
428 unsigned int bits, bitmask, hoffextra;
429 /* Bottom three bits show how many extra hash bits. */
430 bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1;
431 bitmask = (1 << bits)-1;
432 hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask);
433 if ((hextra & bitmask) != hoffextra)
437 r = tdb_get(tdb, off, &pad, sizeof(*r));
441 if (rec_magic(r) != TDB_MAGIC) {
442 tdb->ecode = TDB_ERR_CORRUPT;
443 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
444 "find_bucket_and_lock: bad magic 0x%llx"
445 " at offset %llu!\n",
446 (long long)rec_magic(r), (long long)off);
450 /* FIXME: check extra bits in header! */
451 keylen = rec_key_length(r);
452 if (keylen != key->dsize)
455 switch (tdb_parse_data(tdb, *key, off + sizeof(*r), key->dsize,
456 tdb_key_compare, NULL)) {
459 *room = rec_data_length(r) + rec_extra_padding(r);
460 return off >> TDB_EXTRA_HASHBITS_NUM;
468 /* Lock next bucket. */
469 /* FIXME: We can deadlock if this wraps! */
470 next = (*end + 1) & ((1ULL << tdb->header.v.hash_bits) - 1);
471 if (next == *start) {
472 tdb->ecode = TDB_ERR_CORRUPT;
473 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
474 "find_bucket_and_lock: full hash table!\n");
477 if (tdb_lock_list(tdb, next, ltype, TDB_LOCK_WAIT) == -1)
483 TEST_IT(*end < *start);
484 unlock_lists(tdb, *start, *end, ltype);
488 static int update_rec_hdr(struct tdb_context *tdb,
495 struct tdb_used_record rec;
497 if (set_header(tdb, &rec, keylen, datalen, room - datalen, h))
500 return tdb_write_convert(tdb, off, &rec, sizeof(rec));
503 /* If we fail, others will try after us. */
504 static void enlarge_hash(struct tdb_context *tdb)
507 uint64_t h, num = 1ULL << tdb->header.v.hash_bits;
508 struct tdb_used_record pad, *r;
510 /* FIXME: We should do this without holding locks throughout. */
511 if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
514 if (unlikely(update_header(tdb))) {
515 /* Someone else enlarged for us? Nothing to do. */
516 if ((1ULL << tdb->header.v.hash_bits) != num)
520 newoff = alloc(tdb, 0, num * 2, 0, false);
521 if (unlikely(newoff == TDB_OFF_ERR))
523 if (unlikely(newoff == 0)) {
524 if (tdb_expand(tdb, 0, num * 2, false) == -1)
526 newoff = alloc(tdb, 0, num * 2, 0, false);
527 if (newoff == TDB_OFF_ERR || newoff == 0)
531 /* FIXME: If the space before is empty, we know this is in its ideal
532 * location. We can steal a bit from the pointer to avoid rehash. */
533 for (i = tdb_find_nonzero_off(tdb, tdb->header.v.hash_off, num);
535 i += tdb_find_nonzero_off(tdb, tdb->header.v.hash_off
536 + i*sizeof(tdb_off_t), num - i)) {
538 off = tdb_read_off(tdb, tdb->header.v.hash_off
539 + i*sizeof(tdb_off_t));
540 if (unlikely(off == TDB_OFF_ERR))
542 if (unlikely(!off)) {
543 tdb->ecode = TDB_ERR_CORRUPT;
544 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
545 "find_bucket_and_lock: zero hash bucket!\n");
548 h = hash_record(tdb, off);
549 /* FIXME: Encode extra hash bits! */
550 if (tdb_write_off(tdb, newoff
551 + (h & ((num * 2) - 1)) * sizeof(uint64_t),
556 /* Free up old hash. */
557 r = tdb_get(tdb, tdb->header.v.hash_off, &pad, sizeof(*r));
560 add_free_record(tdb, tdb->header.v.hash_off,
561 rec_data_length(r) + rec_extra_padding(r));
563 /* Now we write the modified header. */
564 tdb->header.v.generation++;
565 tdb->header.v.hash_bits++;
566 tdb->header.v.hash_off = newoff;
567 tdb_write_convert(tdb, offsetof(struct tdb_header, v),
568 &tdb->header.v, sizeof(tdb->header.v));
570 tdb_allrecord_unlock(tdb, F_WRLCK);
573 int tdb_store(struct tdb_context *tdb,
574 struct tdb_data key, struct tdb_data dbuf, int flag)
576 tdb_off_t new_off, off, start, end, room;
578 bool growing = false;
580 h = tdb_hash(tdb, key.dptr, key.dsize);
581 off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_WRLCK);
582 if (off == TDB_OFF_ERR)
585 /* Now we have lock on this hash bucket. */
586 if (flag == TDB_INSERT) {
588 tdb->ecode = TDB_ERR_EXISTS;
593 if (room >= key.dsize + dbuf.dsize) {
595 if (update_rec_hdr(tdb, off,
596 key.dsize, dbuf.dsize,
601 /* FIXME: See if right record is free? */
602 /* Hint to allocator that we've realloced. */
605 if (flag == TDB_MODIFY) {
606 /* if the record doesn't exist and we
607 are in TDB_MODIFY mode then we should fail
609 tdb->ecode = TDB_ERR_NOEXIST;
615 /* Allocate a new record. */
616 new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
618 unlock_lists(tdb, start, end, F_WRLCK);
619 /* Expand, then try again... */
620 if (tdb_expand(tdb, key.dsize, dbuf.dsize, growing) == -1)
622 return tdb_store(tdb, key, dbuf, flag);
625 /* We didn't like the existing one: remove it. */
627 add_free_record(tdb, off, sizeof(struct tdb_used_record)
632 off = tdb->header.v.hash_off + end * sizeof(tdb_off_t);
633 /* FIXME: Encode extra hash bits! */
634 if (tdb_write_off(tdb, off, new_off) == -1)
637 off = new_off + sizeof(struct tdb_used_record);
638 if (tdb->methods->write(tdb, off, key.dptr, key.dsize) == -1)
641 if (tdb->methods->write(tdb, off, dbuf.dptr, dbuf.dsize) == -1)
644 /* FIXME: tdb_increment_seqnum(tdb); */
645 unlock_lists(tdb, start, end, F_WRLCK);
647 /* By simple trial and error, this roughly approximates a 60%
649 if (unlikely(end - start > 4 * tdb->header.v.hash_bits - 32))
655 unlock_lists(tdb, start, end, F_WRLCK);
659 struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
661 tdb_off_t off, start, end, room;
663 struct tdb_used_record pad, *r;
666 h = tdb_hash(tdb, key.dptr, key.dsize);
667 off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_RDLCK);
668 if (off == TDB_OFF_ERR)
672 unlock_lists(tdb, start, end, F_RDLCK);
673 tdb->ecode = TDB_SUCCESS;
677 r = tdb_get(tdb, off, &pad, sizeof(*r));
679 unlock_lists(tdb, start, end, F_RDLCK);
683 ret.dsize = rec_data_length(r);
684 ret.dptr = tdb_alloc_read(tdb, off + sizeof(*r) + key.dsize,
686 unlock_lists(tdb, start, end, F_RDLCK);
690 static int hash_add(struct tdb_context *tdb, uint64_t h, tdb_off_t off)
692 tdb_off_t i, hoff, len, num;
694 i = (h & ((1ULL << tdb->header.v.hash_bits) - 1));
695 hoff = tdb->header.v.hash_off + i * sizeof(tdb_off_t);
696 len = (1ULL << tdb->header.v.hash_bits) - i;
698 /* Look for next space. */
699 num = tdb_find_zero_off(tdb, hoff, len);
700 if (unlikely(num == len)) {
701 hoff = tdb->header.v.hash_off;
702 len = (1ULL << tdb->header.v.hash_bits);
703 num = tdb_find_zero_off(tdb, hoff, len);
707 /* FIXME: Encode extra hash bits! */
708 return tdb_write_off(tdb, hoff + num * sizeof(tdb_off_t), off);
711 static int unlink_used_record(struct tdb_context *tdb, tdb_off_t chain,
712 uint64_t *extra_locks)
714 tdb_off_t num, len, i, hoff;
716 /* FIXME: Maybe lock more in search? Maybe don't lock if scan
719 len = (1ULL << tdb->header.v.hash_bits) - (chain + 1);
720 hoff = tdb->header.v.hash_off + (chain + 1) * sizeof(tdb_off_t);
721 num = tdb_find_zero_off(tdb, hoff, len);
723 /* We want to lock the zero entry, too. In the wrap case,
724 * this locks one extra. That's harmless. */
727 for (i = chain + 1; i < chain + 1 + num; i++) {
728 if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_WAIT) == -1) {
730 unlock_lists(tdb, chain + 1, i-1, F_WRLCK);
735 /* The wrap case: we need those locks out of order! */
736 if (unlikely(num == len + 1)) {
737 *extra_locks = tdb_find_zero_off(tdb, tdb->header.v.hash_off,
738 1ULL << tdb->header.v.hash_bits);
740 for (i = 0; i < *extra_locks; i++) {
741 if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_NOWAIT)) {
742 /* Failed. Caller must lock in order. */
744 unlock_lists(tdb, 0, i-1, F_WRLCK);
745 unlock_lists(tdb, chain + 1, chain + num,
753 /* Now we have the locks, be certain that offset is still 0! */
754 hoff = tdb->header.v.hash_off
755 + (((chain + num) * sizeof(tdb_off_t))
756 & ((1ULL << tdb->header.v.hash_bits) - 1));
758 if (unlikely(tdb_read_off(tdb, hoff) != 0)) {
759 unlock_lists(tdb, chain + 1, chain + num, F_WRLCK);
763 /* OK, all locked. Unlink first one. */
764 hoff = tdb->header.v.hash_off + chain * sizeof(tdb_off_t);
765 if (tdb_write_off(tdb, hoff, 0) == -1)
768 /* Rehash the rest. */
769 for (i = 1; i < num; i++) {
773 hoff = tdb->header.v.hash_off
774 + (((chain + i) * sizeof(tdb_off_t))
775 & ((1ULL << tdb->header.v.hash_bits) - 1));
776 off = tdb_read_off(tdb, hoff);
777 if (unlikely(off == TDB_OFF_ERR))
780 /* Maybe use a bit to indicate it is in ideal place? */
781 h = hash_record(tdb, off);
782 /* Is it happy where it is? */
783 if ((h & ((1ULL << tdb->header.v.hash_bits)-1)) == (chain + i))
787 if (tdb_write_off(tdb, hoff, 0) == -1)
791 if (hash_add(tdb, h, off) == -1)
794 unlock_lists(tdb, chain + 1, chain + num, F_WRLCK);
798 unlock_lists(tdb, chain + 1, chain + num, F_WRLCK);
802 int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
804 tdb_off_t off, start, end, room, extra_locks = 0;
808 h = tdb_hash(tdb, key.dptr, key.dsize);
809 off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_WRLCK);
810 if (off == TDB_OFF_ERR)
814 unlock_lists(tdb, start, end, F_WRLCK);
815 tdb->ecode = TDB_ERR_NOEXIST;
819 ret = unlink_used_record(tdb, end, &extra_locks);
820 if (unlikely(ret == 1)) {
823 unlock_lists(tdb, start, end, F_WRLCK);
825 /* We need extra locks at the start. */
826 for (i = 0; i < extra_locks; i++) {
827 if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_WAIT)) {
829 unlock_lists(tdb, 0, i-1, F_WRLCK);
833 /* Try again now we're holding more locks. */
834 ret = tdb_delete(tdb, key);
835 unlock_lists(tdb, 0, i, F_WRLCK);
838 unlock_lists(tdb, start, end, F_WRLCK);
842 int tdb_close(struct tdb_context *tdb)
844 struct tdb_context **i;
848 if (tdb->transaction) {
849 tdb_transaction_cancel(tdb);
852 tdb_trace(tdb, "tdb_close");
855 if (tdb->flags & TDB_INTERNAL)
860 free((char *)tdb->name);
862 ret = close(tdb->fd);
867 /* Remove from contexts list */
868 for (i = &tdbs; *i; i = &(*i)->next) {