2 #include <ccan/tdb2/tdb2.h>
3 #include <ccan/hash/hash.h>
4 #include <ccan/build_assert/build_assert.h>
5 #include <ccan/likely/likely.h>
9 struct tdb_data tdb_null = { .dptr = NULL, .dsize = 0 };
11 /* all contexts, to ensure no double-opens (fcntl locks don't nest!) */
12 static struct tdb_context *tdbs = NULL;
14 PRINTF_ATTRIBUTE(4, 5) static void
15 null_log_fn(struct tdb_context *tdb,
16 enum tdb_debug_level level, void *priv,
21 /* We do a lot of work assuming our copy of the header volatile area
22 * is uptodate, and usually it is. However, once we grab a lock, we have to
24 bool header_changed(struct tdb_context *tdb)
28 if (!(tdb->flags & TDB_NOLOCK) && tdb->header_uptodate) {
29 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
30 "warning: header uptodate already\n");
33 /* We could get a partial update if we're not holding any locks. */
34 assert((tdb->flags & TDB_NOLOCK) || tdb_has_locks(tdb));
36 tdb->header_uptodate = true;
37 gen = tdb_read_off(tdb, offsetof(struct tdb_header, v.generation));
38 if (unlikely(gen != tdb->header.v.generation)) {
39 tdb_read_convert(tdb, offsetof(struct tdb_header, v),
40 &tdb->header.v, sizeof(tdb->header.v));
46 int write_header(struct tdb_context *tdb)
48 assert(tdb_read_off(tdb, offsetof(struct tdb_header, v.generation))
49 == tdb->header.v.generation);
50 tdb->header.v.generation++;
51 return tdb_write_convert(tdb, offsetof(struct tdb_header, v),
52 &tdb->header.v, sizeof(tdb->header.v));
55 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
58 return hash64_stable((const unsigned char *)key, length, seed);
61 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
63 return tdb->khash(ptr, len, tdb->header.hash_seed, tdb->hash_priv);
66 static bool tdb_already_open(dev_t device, ino_t ino)
68 struct tdb_context *i;
70 for (i = tdbs; i; i = i->next) {
71 if (i->device == device && i->inode == ino) {
79 static uint64_t random_number(struct tdb_context *tdb)
85 fd = open("/dev/urandom", O_RDONLY);
87 if (tdb_read_all(fd, &ret, sizeof(ret))) {
88 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
89 "tdb_open: random from /dev/urandom\n");
95 /* FIXME: Untested! Based on Wikipedia protocol description! */
96 fd = open("/dev/egd-pool", O_RDWR);
98 /* Command is 1, next byte is size we want to read. */
99 char cmd[2] = { 1, sizeof(uint64_t) };
100 if (write(fd, cmd, sizeof(cmd)) == sizeof(cmd)) {
101 char reply[1 + sizeof(uint64_t)];
102 int r = read(fd, reply, sizeof(reply));
104 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
105 "tdb_open: %u random bytes from"
106 " /dev/egd-pool\n", r-1);
107 /* Copy at least some bytes. */
108 memcpy(&ret, reply+1, r - 1);
109 if (reply[0] == sizeof(uint64_t)
110 && r == sizeof(reply)) {
119 /* Fallback: pid and time. */
120 gettimeofday(&now, NULL);
121 ret = getpid() * 100132289ULL + now.tv_sec * 1000000ULL + now.tv_usec;
122 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
123 "tdb_open: random from getpid and time\n");
127 struct new_database {
128 struct tdb_header hdr;
129 struct tdb_used_record hrec;
130 tdb_off_t hash[1ULL << INITIAL_HASH_BITS];
131 struct tdb_used_record frec;
132 tdb_off_t free[INITIAL_FREE_BUCKETS + 1]; /* One overflow bucket */
135 /* initialise a new database */
136 static int tdb_new_database(struct tdb_context *tdb)
138 /* We make it up in memory, then write it out if not internal */
139 struct new_database newdb;
140 unsigned int magic_off = offsetof(struct tdb_header, magic_food);
142 /* Fill in the header */
143 newdb.hdr.version = TDB_VERSION;
144 newdb.hdr.hash_seed = random_number(tdb);
145 newdb.hdr.hash_test = TDB_HASH_MAGIC;
146 newdb.hdr.hash_test = tdb->khash(&newdb.hdr.hash_test,
147 sizeof(newdb.hdr.hash_test),
151 newdb.hdr.v.generation = 0;
153 /* The initial zone must cover the initial database size! */
154 BUILD_ASSERT((1ULL << INITIAL_ZONE_BITS) >= sizeof(newdb));
156 /* Free array has 1 zone, 10 buckets. All buckets empty. */
157 newdb.hdr.v.num_zones = 1;
158 newdb.hdr.v.zone_bits = INITIAL_ZONE_BITS;
159 newdb.hdr.v.free_buckets = INITIAL_FREE_BUCKETS;
160 newdb.hdr.v.free_off = offsetof(struct new_database, free);
161 set_header(tdb, &newdb.frec, 0,
162 sizeof(newdb.free), sizeof(newdb.free), 0);
163 memset(newdb.free, 0, sizeof(newdb.free));
165 /* Initial hashes are empty. */
166 newdb.hdr.v.hash_bits = INITIAL_HASH_BITS;
167 newdb.hdr.v.hash_off = offsetof(struct new_database, hash);
168 set_header(tdb, &newdb.hrec, 0,
169 sizeof(newdb.hash), sizeof(newdb.hash), 0);
170 memset(newdb.hash, 0, sizeof(newdb.hash));
173 memset(newdb.hdr.magic_food, 0, sizeof(newdb.hdr.magic_food));
174 strcpy(newdb.hdr.magic_food, TDB_MAGIC_FOOD);
176 /* This creates an endian-converted database, as if read from disk */
178 (char *)&newdb.hdr + magic_off,
179 sizeof(newdb) - magic_off);
181 tdb->header = newdb.hdr;
183 if (tdb->flags & TDB_INTERNAL) {
184 tdb->map_size = sizeof(newdb);
185 tdb->map_ptr = malloc(tdb->map_size);
187 tdb->ecode = TDB_ERR_OOM;
190 memcpy(tdb->map_ptr, &newdb, tdb->map_size);
193 if (lseek(tdb->fd, 0, SEEK_SET) == -1)
196 if (ftruncate(tdb->fd, 0) == -1)
199 if (!tdb_pwrite_all(tdb->fd, &newdb, sizeof(newdb), 0)) {
200 tdb->ecode = TDB_ERR_IO;
206 struct tdb_context *tdb_open(const char *name, int tdb_flags,
207 int open_flags, mode_t mode,
208 union tdb_attribute *attr)
210 struct tdb_context *tdb;
216 tdb = malloc(sizeof(*tdb));
225 /* map_size will be set below. */
226 tdb->ecode = TDB_SUCCESS;
227 /* header will be read in below. */
228 tdb->header_uptodate = false;
229 tdb->flags = tdb_flags;
230 tdb->log = null_log_fn;
231 tdb->log_priv = NULL;
232 tdb->khash = jenkins_hash;
233 tdb->hash_priv = NULL;
234 tdb->transaction = NULL;
235 /* last_zone will be set below. */
241 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
242 "tdb_open: attributes not yet supported\n");
247 if ((open_flags & O_ACCMODE) == O_WRONLY) {
248 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
249 "tdb_open: can't open tdb %s write-only\n", name);
254 if ((open_flags & O_ACCMODE) == O_RDONLY) {
255 tdb->read_only = true;
256 /* read only databases don't do locking */
257 tdb->flags |= TDB_NOLOCK;
259 tdb->read_only = false;
261 /* internal databases don't need any of the rest. */
262 if (tdb->flags & TDB_INTERNAL) {
263 tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP);
264 if (tdb_new_database(tdb) != 0) {
265 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
266 "tdb_open: tdb_new_database failed!");
269 TEST_IT(tdb->flags & TDB_CONVERT);
270 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
274 if ((tdb->fd = open(name, open_flags, mode)) == -1) {
275 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
276 "tdb_open: could not open file %s: %s\n",
277 name, strerror(errno));
278 goto fail; /* errno set by open(2) */
281 /* on exec, don't inherit the fd */
282 v = fcntl(tdb->fd, F_GETFD, 0);
283 fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC);
285 /* ensure there is only one process initialising at once */
286 if (tdb_lock_open(tdb) == -1) {
287 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
288 "tdb_open: failed to get open lock on %s: %s\n",
289 name, strerror(errno));
290 goto fail; /* errno set by tdb_brlock */
293 if (!tdb_pread_all(tdb->fd, &tdb->header, sizeof(tdb->header), 0)
294 || strcmp(tdb->header.magic_food, TDB_MAGIC_FOOD) != 0) {
295 if (!(open_flags & O_CREAT) || tdb_new_database(tdb) == -1) {
297 errno = EIO; /* ie bad format or something */
301 } else if (tdb->header.version != TDB_VERSION) {
302 if (tdb->header.version == bswap_64(TDB_VERSION))
303 tdb->flags |= TDB_CONVERT;
306 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
307 "tdb_open: %s is unknown version 0x%llx\n",
308 name, (long long)tdb->header.version);
314 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
315 hash_test = TDB_HASH_MAGIC;
316 hash_test = tdb->khash(&hash_test, sizeof(hash_test),
317 tdb->header.hash_seed, tdb->hash_priv);
318 if (tdb->header.hash_test != hash_test) {
319 /* wrong hash variant */
320 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
321 "tdb_open: %s uses a different hash function\n",
327 if (fstat(tdb->fd, &st) == -1)
330 /* Is it already in the open list? If so, fail. */
331 if (tdb_already_open(st.st_dev, st.st_ino)) {
333 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
334 "tdb_open: %s (%d,%d) is already open in this process\n",
335 name, (int)st.st_dev, (int)st.st_ino);
340 tdb->name = strdup(name);
346 tdb->map_size = st.st_size;
347 tdb->device = st.st_dev;
348 tdb->inode = st.st_ino;
350 tdb_unlock_open(tdb);
367 if (tdb->flags & TDB_INTERNAL) {
372 free((char *)tdb->name);
374 if (close(tdb->fd) != 0)
375 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
376 "tdb_open: failed to close tdb->fd"
383 static int tdb_key_compare(TDB_DATA key, TDB_DATA data, void *private_data)
385 return memcmp(data.dptr, key.dptr, data.dsize) == 0;
388 static void unlock_lists(struct tdb_context *tdb,
389 uint64_t start, uint64_t end, int ltype)
392 tdb_unlock_list(tdb, start, ltype);
395 start = (start + 1) & ((1ULL << tdb->header.v.hash_bits) - 1);
399 /* FIXME: Return header copy? */
400 /* Returns -1 or offset of entry (0 if not found).
401 * Locks hash entried from *start to *end (where the entry was found). */
402 static tdb_off_t find_bucket_and_lock(struct tdb_context *tdb,
403 const struct tdb_data *key,
413 /* FIXME: can we avoid locks for some fast paths? */
414 *start = tdb_lock_list(tdb, hash, ltype, TDB_LOCK_WAIT);
415 if (*start == TDB_OFF_ERR)
419 hextra = hash >> tdb->header.v.hash_bits;
421 while ((off = tdb_read_off(tdb, tdb->header.v.hash_off
422 + *end * sizeof(tdb_off_t)))
424 struct tdb_used_record pad, *r;
427 /* Didn't find it? */
431 #if 0 /* FIXME: Check other bits. */
432 unsigned int bits, bitmask, hoffextra;
433 /* Bottom three bits show how many extra hash bits. */
434 bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1;
435 bitmask = (1 << bits)-1;
436 hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask);
437 if ((hextra & bitmask) != hoffextra)
441 r = tdb_get(tdb, off, &pad, sizeof(*r));
445 if (rec_magic(r) != TDB_MAGIC) {
446 tdb->ecode = TDB_ERR_CORRUPT;
447 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
448 "find_bucket_and_lock: bad magic 0x%llx"
449 " at offset %llu!\n",
450 (long long)rec_magic(r), (long long)off);
454 /* FIXME: check extra bits in header! */
455 keylen = rec_key_length(r);
456 if (keylen != key->dsize)
459 switch (tdb_parse_data(tdb, *key, off + sizeof(*r), key->dsize,
460 tdb_key_compare, NULL)) {
463 *room = rec_data_length(r) + rec_extra_padding(r);
464 return off >> TDB_EXTRA_HASHBITS_NUM;
472 /* Lock next bucket. */
473 /* FIXME: We can deadlock if this wraps! */
474 off = tdb_lock_list(tdb, ++hash, ltype, TDB_LOCK_WAIT);
475 if (off == TDB_OFF_ERR)
481 TEST_IT(*end < *start);
482 unlock_lists(tdb, *start, *end, ltype);
486 static int update_rec_hdr(struct tdb_context *tdb,
493 struct tdb_used_record rec;
495 if (set_header(tdb, &rec, keylen, datalen, room - datalen, h))
498 return tdb_write_convert(tdb, off, &rec, sizeof(rec));
501 /* If we fail, others will try after us. */
502 static void enlarge_hash(struct tdb_context *tdb)
505 uint64_t h, num = 1ULL << tdb->header.v.hash_bits;
506 struct tdb_used_record pad, *r;
508 /* FIXME: We should do this without holding locks throughout. */
509 if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
512 /* Someone else enlarged for us? Nothing to do. */
513 if ((1ULL << tdb->header.v.hash_bits) != num)
516 newoff = alloc(tdb, 0, num * 2, 0, false);
517 if (unlikely(newoff == TDB_OFF_ERR))
519 if (unlikely(newoff == 0)) {
520 if (tdb_expand(tdb, 0, num * 2, false) == -1)
522 newoff = alloc(tdb, 0, num * 2, 0, false);
523 if (newoff == TDB_OFF_ERR || newoff == 0)
527 /* FIXME: If the space before is empty, we know this is in its ideal
528 * location. We can steal a bit from the pointer to avoid rehash. */
529 for (i = tdb_find_nonzero_off(tdb, tdb->header.v.hash_off, num);
531 i += tdb_find_nonzero_off(tdb, tdb->header.v.hash_off
532 + i*sizeof(tdb_off_t), num - i)) {
534 off = tdb_read_off(tdb, tdb->header.v.hash_off
535 + i*sizeof(tdb_off_t));
536 if (unlikely(off == TDB_OFF_ERR))
538 if (unlikely(!off)) {
539 tdb->ecode = TDB_ERR_CORRUPT;
540 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
541 "find_bucket_and_lock: zero hash bucket!\n");
544 h = hash_record(tdb, off);
545 /* FIXME: Encode extra hash bits! */
546 if (tdb_write_off(tdb, newoff
547 + (h & ((num * 2) - 1)) * sizeof(uint64_t),
552 /* Free up old hash. */
553 r = tdb_get(tdb, tdb->header.v.hash_off, &pad, sizeof(*r));
556 add_free_record(tdb, tdb->header.v.hash_off,
557 rec_data_length(r) + rec_extra_padding(r));
559 /* Now we write the modified header. */
560 tdb->header.v.generation++;
561 tdb->header.v.hash_bits++;
562 tdb->header.v.hash_off = newoff;
563 tdb_write_convert(tdb, offsetof(struct tdb_header, v),
564 &tdb->header.v, sizeof(tdb->header.v));
566 tdb_allrecord_unlock(tdb, F_WRLCK);
569 int tdb_store(struct tdb_context *tdb,
570 struct tdb_data key, struct tdb_data dbuf, int flag)
572 tdb_off_t new_off, off, start, end, room;
574 bool growing = false;
576 h = tdb_hash(tdb, key.dptr, key.dsize);
577 off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_WRLCK);
578 if (off == TDB_OFF_ERR)
581 /* Now we have lock on this hash bucket. */
582 if (flag == TDB_INSERT) {
584 tdb->ecode = TDB_ERR_EXISTS;
589 if (room >= key.dsize + dbuf.dsize) {
591 if (update_rec_hdr(tdb, off,
592 key.dsize, dbuf.dsize,
597 /* FIXME: See if right record is free? */
598 /* Hint to allocator that we've realloced. */
601 if (flag == TDB_MODIFY) {
602 /* if the record doesn't exist and we
603 are in TDB_MODIFY mode then we should fail
605 tdb->ecode = TDB_ERR_NOEXIST;
611 /* Allocate a new record. */
612 new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
614 unlock_lists(tdb, start, end, F_WRLCK);
615 /* Expand, then try again... */
616 if (tdb_expand(tdb, key.dsize, dbuf.dsize, growing) == -1)
618 return tdb_store(tdb, key, dbuf, flag);
621 /* We didn't like the existing one: remove it. */
623 add_free_record(tdb, off, sizeof(struct tdb_used_record)
628 off = tdb->header.v.hash_off + end * sizeof(tdb_off_t);
629 /* FIXME: Encode extra hash bits! */
630 if (tdb_write_off(tdb, off, new_off) == -1)
633 off = new_off + sizeof(struct tdb_used_record);
634 if (tdb->methods->write(tdb, off, key.dptr, key.dsize) == -1)
637 if (tdb->methods->write(tdb, off, dbuf.dptr, dbuf.dsize) == -1)
640 /* FIXME: tdb_increment_seqnum(tdb); */
641 unlock_lists(tdb, start, end, F_WRLCK);
643 /* By simple trial and error, this roughly approximates a 60%
645 if (unlikely(end - start > 4 * tdb->header.v.hash_bits - 32))
651 unlock_lists(tdb, start, end, F_WRLCK);
655 struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
657 tdb_off_t off, start, end, room;
659 struct tdb_used_record pad, *r;
662 h = tdb_hash(tdb, key.dptr, key.dsize);
663 off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_RDLCK);
664 if (off == TDB_OFF_ERR)
668 unlock_lists(tdb, start, end, F_RDLCK);
669 tdb->ecode = TDB_SUCCESS;
673 r = tdb_get(tdb, off, &pad, sizeof(*r));
675 unlock_lists(tdb, start, end, F_RDLCK);
679 ret.dsize = rec_data_length(r);
680 ret.dptr = tdb_alloc_read(tdb, off + sizeof(*r) + key.dsize,
682 unlock_lists(tdb, start, end, F_RDLCK);
686 static int hash_add(struct tdb_context *tdb, uint64_t h, tdb_off_t off)
688 tdb_off_t i, hoff, len, num;
690 i = (h & ((1ULL << tdb->header.v.hash_bits) - 1));
691 hoff = tdb->header.v.hash_off + i * sizeof(tdb_off_t);
692 len = (1ULL << tdb->header.v.hash_bits) - i;
694 /* Look for next space. */
695 num = tdb_find_zero_off(tdb, hoff, len);
696 if (unlikely(num == len)) {
697 hoff = tdb->header.v.hash_off;
698 len = (1ULL << tdb->header.v.hash_bits);
699 num = tdb_find_zero_off(tdb, hoff, len);
703 /* FIXME: Encode extra hash bits! */
704 return tdb_write_off(tdb, hoff + num * sizeof(tdb_off_t), off);
707 static int unlink_used_record(struct tdb_context *tdb, tdb_off_t chain,
708 uint64_t *extra_locks)
710 tdb_off_t num, len, i, hoff;
712 /* FIXME: Maybe lock more in search? Maybe don't lock if scan
715 len = (1ULL << tdb->header.v.hash_bits) - (chain + 1);
716 hoff = tdb->header.v.hash_off + (chain + 1) * sizeof(tdb_off_t);
717 num = tdb_find_zero_off(tdb, hoff, len);
719 /* We want to lock the zero entry, too. In the wrap case,
720 * this locks one extra. That's harmless. */
723 for (i = chain + 1; i < chain + 1 + num; i++) {
724 if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_WAIT)
727 unlock_lists(tdb, chain + 1, i-1, F_WRLCK);
732 /* The wrap case: we need those locks out of order! */
733 if (unlikely(num == len + 1)) {
734 *extra_locks = tdb_find_zero_off(tdb, tdb->header.v.hash_off,
735 1ULL << tdb->header.v.hash_bits);
737 for (i = 0; i < *extra_locks; i++) {
738 if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_NOWAIT)
740 /* Failed. Caller must lock in order. */
742 unlock_lists(tdb, 0, i-1, F_WRLCK);
743 unlock_lists(tdb, chain + 1, chain + num,
751 /* Now we have the locks, be certain that offset is still 0! */
752 hoff = tdb->header.v.hash_off
753 + (((chain + num) * sizeof(tdb_off_t))
754 & ((1ULL << tdb->header.v.hash_bits) - 1));
756 if (unlikely(tdb_read_off(tdb, hoff) != 0)) {
757 unlock_lists(tdb, chain + 1, chain + num, F_WRLCK);
761 /* OK, all locked. Unlink first one. */
762 hoff = tdb->header.v.hash_off + chain * sizeof(tdb_off_t);
763 if (tdb_write_off(tdb, hoff, 0) == -1)
766 /* Rehash the rest. */
767 for (i = 1; i < num; i++) {
771 hoff = tdb->header.v.hash_off
772 + (((chain + i) * sizeof(tdb_off_t))
773 & ((1ULL << tdb->header.v.hash_bits) - 1));
774 off = tdb_read_off(tdb, hoff);
775 if (unlikely(off == TDB_OFF_ERR))
778 /* Maybe use a bit to indicate it is in ideal place? */
779 h = hash_record(tdb, off);
780 /* Is it happy where it is? */
781 if ((h & ((1ULL << tdb->header.v.hash_bits)-1)) == (chain + i))
785 if (tdb_write_off(tdb, hoff, 0) == -1)
789 if (hash_add(tdb, h, off) == -1)
792 unlock_lists(tdb, chain + 1, chain + num, F_WRLCK);
796 unlock_lists(tdb, chain + 1, chain + num, F_WRLCK);
800 int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
802 tdb_off_t off, start, end, room, extra_locks = 0;
806 h = tdb_hash(tdb, key.dptr, key.dsize);
807 off = find_bucket_and_lock(tdb, &key, h, &start, &end, &room, F_WRLCK);
808 if (off == TDB_OFF_ERR)
812 unlock_lists(tdb, start, end, F_WRLCK);
813 tdb->ecode = TDB_ERR_NOEXIST;
817 ret = unlink_used_record(tdb, end, &extra_locks);
818 if (unlikely(ret == 1)) {
821 unlock_lists(tdb, start, end, F_WRLCK);
823 /* We need extra locks at the start. */
824 for (i = 0; i < extra_locks; i++) {
825 if (tdb_lock_list(tdb, i, F_WRLCK, TDB_LOCK_WAIT)
828 unlock_lists(tdb, 0, i-1, F_WRLCK);
832 /* Try again now we're holding more locks. */
833 ret = tdb_delete(tdb, key);
834 unlock_lists(tdb, 0, i, F_WRLCK);
837 unlock_lists(tdb, start, end, F_WRLCK);
841 int tdb_close(struct tdb_context *tdb)
843 struct tdb_context **i;
847 if (tdb->transaction) {
848 tdb_transaction_cancel(tdb);
851 tdb_trace(tdb, "tdb_close");
854 if (tdb->flags & TDB_INTERNAL)
859 free((char *)tdb->name);
861 ret = close(tdb->fd);
866 /* Remove from contexts list */
867 for (i = &tdbs; *i; i = &(*i)->next) {
882 enum TDB_ERROR tdb_error(struct tdb_context *tdb)