2 #include <ccan/tdb2/tdb2.h>
3 #include <ccan/hash/hash.h>
4 #include <ccan/likely/likely.h>
8 struct tdb_data tdb_null = { .dptr = NULL, .dsize = 0 };
10 /* all contexts, to ensure no double-opens (fcntl locks don't nest!) */
11 static struct tdb_context *tdbs = NULL;
13 PRINTF_ATTRIBUTE(4, 5) static void
14 null_log_fn(struct tdb_context *tdb,
15 enum tdb_debug_level level, void *priv,
20 /* We do a lot of work assuming our copy of the header volatile area
21 * is uptodate, and usually it is. However, once we grab a lock, we have to
23 bool update_header(struct tdb_context *tdb)
25 struct tdb_header_volatile pad, *v;
27 if (tdb->header_uptodate) {
28 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
29 "warning: header uptodate already\n");
32 /* We could get a partial update if we're not holding any locks. */
33 assert(tdb_has_locks(tdb));
35 v = tdb_get(tdb, offsetof(struct tdb_header, v), &pad, sizeof(*v));
37 /* On failure, imply we updated header so they retry. */
40 tdb->header_uptodate = true;
41 if (likely(memcmp(&tdb->header.v, v, sizeof(*v)) == 0)) {
48 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
51 return hash64_any(key, length, seed);
54 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
56 return tdb->khash(ptr, len, tdb->header.hash_seed, tdb->hash_priv);
59 static bool tdb_already_open(dev_t device, ino_t ino)
61 struct tdb_context *i;
63 for (i = tdbs; i; i = i->next) {
64 if (i->device == device && i->inode == ino) {
72 static uint64_t random_number(struct tdb_context *tdb)
78 fd = open("/dev/urandom", O_RDONLY);
80 if (read(fd, &ret, sizeof(ret)) == sizeof(ret)) {
81 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
82 "tdb_open: random from /dev/urandom\n");
88 /* FIXME: Untested! Based on Wikipedia protocol description! */
89 fd = open("/dev/egd-pool", O_RDWR);
91 /* Command is 1, next byte is size we want to read. */
92 char cmd[2] = { 1, sizeof(uint64_t) };
93 if (write(fd, cmd, sizeof(cmd)) == sizeof(cmd)) {
94 char reply[1 + sizeof(uint64_t)];
95 int r = read(fd, reply, sizeof(reply));
97 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
98 "tdb_open: %u random bytes from"
99 " /dev/egd-pool\n", r-1);
100 /* Copy at least some bytes. */
101 memcpy(&ret, reply+1, r - 1);
102 if (reply[0] == sizeof(uint64_t)
103 && r == sizeof(reply)) {
112 /* Fallback: pid and time. */
113 gettimeofday(&now, NULL);
114 ret = getpid() * 100132289ULL + now.tv_sec * 1000000ULL + now.tv_usec;
115 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
116 "tdb_open: random from getpid and time\n");
120 struct new_database {
121 struct tdb_header hdr;
122 struct tdb_used_record hrec;
123 tdb_off_t hash[1ULL << INITIAL_HASH_BITS];
124 struct tdb_used_record frec;
125 tdb_off_t free[INITIAL_FREE_BUCKETS + 1]; /* One overflow bucket */
128 /* initialise a new database */
129 static int tdb_new_database(struct tdb_context *tdb)
131 /* We make it up in memory, then write it out if not internal */
132 struct new_database newdb;
134 /* Fill in the header */
135 newdb.hdr.version = TDB_VERSION;
136 newdb.hdr.hash_seed = random_number(tdb);
137 newdb.hdr.hash_test = TDB_HASH_MAGIC;
138 newdb.hdr.hash_test = tdb->khash(&newdb.hdr.hash_test,
139 sizeof(newdb.hdr.hash_test),
143 newdb.hdr.v.generation = 0;
145 /* Free array has 1 zone, 10 buckets. All buckets empty. */
146 newdb.hdr.v.num_zones = 1;
147 newdb.hdr.v.zone_bits = INITIAL_ZONE_BITS;
148 newdb.hdr.v.free_buckets = INITIAL_FREE_BUCKETS;
149 newdb.hdr.v.free_off = offsetof(struct new_database, free);
150 set_header(tdb, &newdb.frec, 0,
151 sizeof(newdb.free), sizeof(newdb.free), 0);
152 memset(newdb.free, 0, sizeof(newdb.free));
154 /* Initial hashes are empty. */
155 newdb.hdr.v.hash_bits = INITIAL_HASH_BITS;
156 newdb.hdr.v.hash_off = offsetof(struct new_database, hash);
157 set_header(tdb, &newdb.hrec, 0,
158 sizeof(newdb.hash), sizeof(newdb.hash), 0);
159 memset(newdb.hash, 0, sizeof(newdb.hash));
161 if (tdb->flags & TDB_INTERNAL) {
162 tdb->map_size = sizeof(newdb);
163 tdb->map_ptr = malloc(tdb->map_size);
165 tdb->ecode = TDB_ERR_OOM;
168 memcpy(tdb->map_ptr, &newdb, tdb->map_size);
169 tdb->header = newdb.hdr;
170 /* Convert the `ondisk' version if asked. */
171 tdb_convert(tdb, tdb->map_ptr, sizeof(newdb));
174 if (lseek(tdb->fd, 0, SEEK_SET) == -1)
177 if (ftruncate(tdb->fd, 0) == -1)
180 /* This creates an endian-converted header, as if read from disk */
181 tdb->header = newdb.hdr;
182 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
184 /* Don't endian-convert the magic food! */
185 memset(newdb.hdr.magic_food, 0, sizeof(newdb.hdr.magic_food));
186 strcpy(newdb.hdr.magic_food, TDB_MAGIC_FOOD);
188 if (!tdb_pwrite_all(tdb->fd, &newdb, sizeof(newdb), 0)) {
189 tdb->ecode = TDB_ERR_IO;
195 struct tdb_context *tdb_open(const char *name, int tdb_flags,
196 int open_flags, mode_t mode,
197 union tdb_attribute *attr)
199 struct tdb_context *tdb;
205 if (!(tdb = (struct tdb_context *)calloc(1, sizeof *tdb))) {
213 tdb->flags = tdb_flags;
214 tdb->log = null_log_fn;
215 tdb->log_priv = NULL;
216 tdb->khash = jenkins_hash;
217 tdb->hash_priv = NULL;
221 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
222 "tdb_open: attributes not yet supported\n");
227 if ((open_flags & O_ACCMODE) == O_WRONLY) {
228 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
229 "tdb_open: can't open tdb %s write-only\n", name);
234 if ((open_flags & O_ACCMODE) == O_RDONLY) {
236 /* read only databases don't do locking */
237 tdb->flags |= TDB_NOLOCK;
240 /* internal databases don't mmap or lock */
241 if (tdb->flags & TDB_INTERNAL) {
242 tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP);
243 if (tdb_new_database(tdb) != 0) {
244 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
245 "tdb_open: tdb_new_database failed!");
248 TEST_IT(tdb->flags & TDB_CONVERT);
252 if ((tdb->fd = open(name, open_flags, mode)) == -1) {
253 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
254 "tdb_open: could not open file %s: %s\n",
255 name, strerror(errno));
256 goto fail; /* errno set by open(2) */
259 /* on exec, don't inherit the fd */
260 v = fcntl(tdb->fd, F_GETFD, 0);
261 fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC);
263 /* ensure there is only one process initialising at once */
264 if (tdb_lock_open(tdb) == -1) {
265 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
266 "tdb_open: failed to get open lock on %s: %s\n",
267 name, strerror(errno));
268 goto fail; /* errno set by tdb_brlock */
272 if (read(tdb->fd, &tdb->header, sizeof(tdb->header)) != sizeof(tdb->header)
273 || strcmp(tdb->header.magic_food, TDB_MAGIC_FOOD) != 0) {
274 if (!(open_flags & O_CREAT) || tdb_new_database(tdb) == -1) {
276 errno = EIO; /* ie bad format or something */
280 } else if (tdb->header.version != TDB_VERSION) {
281 if (tdb->header.version == bswap_64(TDB_VERSION))
282 tdb->flags |= TDB_CONVERT;
285 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
286 "tdb_open: %s is unknown version 0x%llx\n",
287 name, (long long)tdb->header.version);
293 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
294 hash_test = TDB_HASH_MAGIC;
295 hash_test = tdb->khash(&hash_test, sizeof(hash_test),
296 tdb->header.hash_seed, tdb->hash_priv);
297 if (tdb->header.hash_test != hash_test) {
298 /* wrong hash variant */
299 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
300 "tdb_open: %s uses a different hash function\n",
306 if (fstat(tdb->fd, &st) == -1)
309 /* Is it already in the open list? If so, fail. */
310 if (tdb_already_open(st.st_dev, st.st_ino)) {
312 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
313 "tdb_open: %s (%d,%d) is already open in this process\n",
314 name, (int)st.st_dev, (int)st.st_ino);
319 tdb->name = strdup(name);
325 tdb->map_size = st.st_size;
326 tdb->device = st.st_dev;
327 tdb->inode = st.st_ino;
332 /* Internal (memory-only) databases skip all the code above to
333 * do with disk files, and resume here by releasing their
334 * open lock and hooking into the active list. */
335 tdb_unlock_open(tdb);
336 tdb->last_zone = random_free_zone(tdb);
351 if (tdb->flags & TDB_INTERNAL) {
356 free((char *)tdb->name);
358 if (close(tdb->fd) != 0)
359 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
360 "tdb_open: failed to close tdb->fd"
367 static int tdb_key_compare(TDB_DATA key, TDB_DATA data, void *private_data)
369 return memcmp(data.dptr, key.dptr, data.dsize) == 0;
372 static void unlock_lists(struct tdb_context *tdb,
373 uint64_t start, uint64_t end, int ltype)
376 tdb_unlock_list(tdb, start, ltype);
377 start = (start + ((1ULL << tdb->header.v.hash_bits) - 1))
378 & ((1ULL << tdb->header.v.hash_bits) - 1);
379 } while (start != end);
382 /* FIXME: Return header copy? */
383 /* Returns -1 or offset of entry (0 if not found).
384 * Locks hash entried from *start to *end (where the entry was found). */
385 static tdb_off_t find_bucket_and_lock(struct tdb_context *tdb,
386 const struct tdb_data *key,
396 /* hash_bits might be out of date... */
398 *start = *end = hash & ((1ULL << tdb->header.v.hash_bits) - 1);
399 hextra = hash >> tdb->header.v.hash_bits;
401 /* FIXME: can we avoid locks for some fast paths? */
402 if (tdb_lock_list(tdb, *end, ltype, TDB_LOCK_WAIT) == -1)
405 /* We only need to check this for first lock. */
406 if (unlikely(update_header(tdb))) {
407 tdb_unlock_list(tdb, *end, ltype);
411 while ((off = tdb_read_off(tdb, tdb->header.v.hash_off
412 + *end * sizeof(tdb_off_t)))
414 struct tdb_used_record pad, *r;
415 uint64_t keylen, next;
417 /* Didn't find it? */
421 #if 0 /* FIXME: Check other bits. */
422 unsigned int bits, bitmask, hoffextra;
423 /* Bottom three bits show how many extra hash bits. */
424 bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1;
425 bitmask = (1 << bits)-1;
426 hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask);
427 if ((hextra & bitmask) != hoffextra)
431 r = tdb_get(tdb, off, &pad, sizeof(*r));
435 if (rec_magic(r) != TDB_MAGIC) {
436 tdb->ecode = TDB_ERR_CORRUPT;
437 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
438 "find_bucket_and_lock: bad magic 0x%llx"
439 " at offset %llu!\n",
440 (long long)rec_magic(r), (long long)off);
444 /* FIXME: check extra bits in header! */
445 keylen = rec_key_length(r);
446 if (keylen != key->dsize)
449 switch (tdb_parse_data(tdb, *key, off + sizeof(*r), key->dsize,
450 tdb_key_compare, NULL)) {
453 *room = rec_data_length(r) + rec_extra_padding(r);
454 return off >> TDB_EXTRA_HASHBITS_NUM;
462 /* Lock next bucket. */
463 /* FIXME: We can deadlock if this wraps! */
464 next = (*end + 1) & ((1ULL << tdb->header.v.hash_bits) - 1);
465 if (next == *start) {
466 tdb->ecode = TDB_ERR_CORRUPT;
467 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
468 "find_bucket_and_lock: full hash table!\n");
471 if (tdb_lock_list(tdb, next, ltype, TDB_LOCK_WAIT) == -1)
477 TEST_IT(*end < *start);
478 unlock_lists(tdb, *start, *end, ltype);
482 static int update_rec_hdr(struct tdb_context *tdb,
489 struct tdb_used_record rec;
491 if (set_header(tdb, &rec, keylen, datalen, room - datalen, h))
494 return tdb_write_convert(tdb, off, &rec, sizeof(rec));
497 /* If we fail, others will try after us. */
498 static void enlarge_hash(struct tdb_context *tdb)
501 uint64_t h, num = 1ULL << tdb->header.v.hash_bits;
502 struct tdb_used_record pad, *r;
504 /* FIXME: We should do this without holding locks throughout. */
505 if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
508 if (unlikely(update_header(tdb))) {
509 /* Someone else enlarged for us? Nothing to do. */
510 if ((1ULL << tdb->header.v.hash_bits) != num)
514 newoff = alloc(tdb, 0, num * 2, 0, false);
515 if (unlikely(newoff == TDB_OFF_ERR))
517 if (unlikely(newoff == 0)) {
518 if (tdb_expand(tdb, 0, num * 2, false) == -1)
520 newoff = alloc(tdb, 0, num * 2, 0, false);
521 if (newoff == TDB_OFF_ERR || newoff == 0)
525 /* FIXME: If the space before is empty, we know this is in its ideal
526 * location. We can steal a bit from the pointer to avoid rehash. */
527 for (i = tdb_find_nonzero_off(tdb, tdb->header.v.hash_off, num);
529 i += tdb_find_nonzero_off(tdb, tdb->header.v.hash_off
530 + i*sizeof(tdb_off_t), num - i)) {
532 off = tdb_read_off(tdb, tdb->header.v.hash_off
533 + i*sizeof(tdb_off_t));
534 if (unlikely(off == TDB_OFF_ERR))
536 if (unlikely(!off)) {
537 tdb->ecode = TDB_ERR_CORRUPT;
538 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
539 "find_bucket_and_lock: zero hash bucket!\n");
542 h = hash_record(tdb, off);
543 /* FIXME: Encode extra hash bits! */
544 if (tdb_write_off(tdb, newoff
545 + (h & ((num * 2) - 1)) * sizeof(uint64_t),
550 /* Free up old hash. */
551 r = tdb_get(tdb, tdb->header.v.hash_off, &pad, sizeof(*r));
554 add_free_record(tdb, tdb->header.v.hash_off,
555 rec_data_length(r) + rec_extra_padding(r));
557 /* Now we write the modified header. */
558 tdb->header.v.generation++;
559 tdb->header.v.hash_bits++;
560 tdb->header.v.hash_off = newoff;
561 tdb_write_convert(tdb, offsetof(struct tdb_header, v),
562 &tdb->header.v, sizeof(tdb->header.v));
564 tdb_allrecord_unlock(tdb, F_WRLCK);
567 int tdb_store(struct tdb_context *tdb,
568 struct tdb_data key, struct tdb_data dbuf, int flag)
570 tdb_off_t new_off, off, start, end, room;
572 bool growing = false;
574 h = tdb_hash(tdb, key.dptr, key.dsize);
575 off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_WRLCK);
576 if (off == TDB_OFF_ERR)
579 /* Now we have lock on this hash bucket. */
580 if (flag == TDB_INSERT) {
582 tdb->ecode = TDB_ERR_EXISTS;
587 if (room >= key.dsize + dbuf.dsize) {
589 if (update_rec_hdr(tdb, off,
590 key.dsize, dbuf.dsize,
595 /* FIXME: See if right record is free? */
596 /* Hint to allocator that we've realloced. */
599 if (flag == TDB_MODIFY) {
600 /* if the record doesn't exist and we
601 are in TDB_MODIFY mode then we should fail
603 tdb->ecode = TDB_ERR_NOEXIST;
609 /* Allocate a new record. */
610 new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
612 unlock_lists(tdb, start, end, F_WRLCK);
613 /* Expand, then try again... */
614 if (tdb_expand(tdb, key.dsize, dbuf.dsize, growing) == -1)
616 return tdb_store(tdb, key, dbuf, flag);
619 /* We didn't like the existing one: remove it. */
621 add_free_record(tdb, off, sizeof(struct tdb_used_record)
626 off = tdb->header.v.hash_off + end * sizeof(tdb_off_t);
627 /* FIXME: Encode extra hash bits! */
628 if (tdb_write_off(tdb, off, new_off) == -1)
631 off = new_off + sizeof(struct tdb_used_record);
632 if (tdb->methods->write(tdb, off, key.dptr, key.dsize) == -1)
635 if (tdb->methods->write(tdb, off, dbuf.dptr, dbuf.dsize) == -1)
638 /* FIXME: tdb_increment_seqnum(tdb); */
639 unlock_lists(tdb, start, end, F_WRLCK);
641 /* By simple trial and error, this roughly approximates a 60%
643 if (unlikely(end - start > 4 * tdb->header.v.hash_bits - 32))
649 unlock_lists(tdb, start, end, F_WRLCK);
653 struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
655 tdb_off_t off, start, end, room;
657 struct tdb_used_record pad, *r;
660 h = tdb_hash(tdb, key.dptr, key.dsize);
661 off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_RDLCK);
662 if (off == TDB_OFF_ERR)
666 unlock_lists(tdb, start, end, F_RDLCK);
667 tdb->ecode = TDB_SUCCESS;
671 r = tdb_get(tdb, off, &pad, sizeof(*r));
673 unlock_lists(tdb, start, end, F_RDLCK);
677 ret.dsize = rec_data_length(r);
678 ret.dptr = tdb_alloc_read(tdb, off + sizeof(*r) + key.dsize,
680 unlock_lists(tdb, start, end, F_RDLCK);
684 static int hash_add(struct tdb_context *tdb, uint64_t h, tdb_off_t off)
686 tdb_off_t i, hoff, len, num;
688 i = (h & ((1ULL << tdb->header.v.hash_bits) - 1));
689 hoff = tdb->header.v.hash_off + i * sizeof(tdb_off_t);
690 len = (1ULL << tdb->header.v.hash_bits) - i;
692 /* Look for next space. */
693 num = tdb_find_zero_off(tdb, hoff, len);
694 if (unlikely(num == len)) {
695 hoff = tdb->header.v.hash_off;
696 len = (1ULL << tdb->header.v.hash_bits);
697 num = tdb_find_zero_off(tdb, hoff, len);
701 /* FIXME: Encode extra hash bits! */
702 return tdb_write_off(tdb, hoff + num * sizeof(tdb_off_t), off);
705 static int unlink_used_record(struct tdb_context *tdb, tdb_off_t chain,
706 uint64_t *extra_locks)
708 tdb_off_t num, len, i, hoff;
710 /* FIXME: Maybe lock more in search? Maybe don't lock if scan
713 len = (1ULL << tdb->header.v.hash_bits) - (chain + 1);
714 hoff = tdb->header.v.hash_off + (chain + 1) * sizeof(tdb_off_t);
715 num = tdb_find_zero_off(tdb, hoff, len);
717 /* We want to lock the zero entry, too. In the wrap case,
718 * this locks one extra. That's harmless. */
721 for (i = chain + 1; i < chain + 1 + num; i++) {
722 if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_WAIT) == -1) {
724 unlock_lists(tdb, chain + 1, i-1, F_WRLCK);
729 /* The wrap case: we need those locks out of order! */
730 if (unlikely(num == len + 1)) {
731 *extra_locks = tdb_find_zero_off(tdb, tdb->header.v.hash_off,
732 1ULL << tdb->header.v.hash_bits);
734 for (i = 0; i < *extra_locks; i++) {
735 if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_NOWAIT)) {
736 /* Failed. Caller must lock in order. */
738 unlock_lists(tdb, 0, i-1, F_WRLCK);
739 unlock_lists(tdb, chain + 1, chain + num,
747 /* Now we have the locks, be certain that offset is still 0! */
748 hoff = tdb->header.v.hash_off
749 + (((chain + num) * sizeof(tdb_off_t))
750 & ((1ULL << tdb->header.v.hash_bits) - 1));
752 if (unlikely(tdb_read_off(tdb, hoff) != 0)) {
753 unlock_lists(tdb, chain + 1, chain + num, F_WRLCK);
757 /* OK, all locked. Unlink first one. */
758 hoff = tdb->header.v.hash_off + chain * sizeof(tdb_off_t);
759 if (tdb_write_off(tdb, hoff, 0) == -1)
762 /* Rehash the rest. */
763 for (i = 1; i < num; i++) {
767 hoff = tdb->header.v.hash_off
768 + (((chain + i) * sizeof(tdb_off_t))
769 & ((1ULL << tdb->header.v.hash_bits) - 1));
770 off = tdb_read_off(tdb, hoff);
771 if (unlikely(off == TDB_OFF_ERR))
774 /* Maybe use a bit to indicate it is in ideal place? */
775 h = hash_record(tdb, off);
776 /* Is it happy where it is? */
777 if ((h & ((1ULL << tdb->header.v.hash_bits)-1)) == (chain + i))
781 if (tdb_write_off(tdb, hoff, 0) == -1)
785 if (hash_add(tdb, h, off) == -1)
788 unlock_lists(tdb, chain + 1, chain + num, F_WRLCK);
792 unlock_lists(tdb, chain + 1, chain + num, F_WRLCK);
796 int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
798 tdb_off_t off, start, end, room, extra_locks = 0;
802 h = tdb_hash(tdb, key.dptr, key.dsize);
803 off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_WRLCK);
804 if (off == TDB_OFF_ERR)
808 unlock_lists(tdb, start, end, F_WRLCK);
809 tdb->ecode = TDB_ERR_NOEXIST;
813 ret = unlink_used_record(tdb, end, &extra_locks);
814 if (unlikely(ret == 1)) {
817 unlock_lists(tdb, start, end, F_WRLCK);
819 /* We need extra locks at the start. */
820 for (i = 0; i < extra_locks; i++) {
821 if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_WAIT)) {
823 unlock_lists(tdb, 0, i-1, F_WRLCK);
827 /* Try again now we're holding more locks. */
828 ret = tdb_delete(tdb, key);
829 unlock_lists(tdb, 0, i, F_WRLCK);
832 unlock_lists(tdb, start, end, F_WRLCK);
836 int tdb_close(struct tdb_context *tdb)
838 struct tdb_context **i;
842 if (tdb->transaction) {
843 tdb_transaction_cancel(tdb);
846 tdb_trace(tdb, "tdb_close");
849 if (tdb->flags & TDB_INTERNAL)
854 free((char *)tdb->name);
856 ret = close(tdb->fd);
861 /* Remove from contexts list */
862 for (i = &tdbs; *i; i = &(*i)->next) {