2 #include <ccan/tdb2/tdb2.h>
3 #include <ccan/hash/hash.h>
4 #include <ccan/build_assert/build_assert.h>
5 #include <ccan/likely/likely.h>
9 struct tdb_data tdb_null = { .dptr = NULL, .dsize = 0 };
11 /* all contexts, to ensure no double-opens (fcntl locks don't nest!) */
12 static struct tdb_context *tdbs = NULL;
14 PRINTF_ATTRIBUTE(4, 5) static void
15 null_log_fn(struct tdb_context *tdb,
16 enum tdb_debug_level level, void *priv,
21 /* We do a lot of work assuming our copy of the header volatile area
22 * is uptodate, and usually it is. However, once we grab a lock, we have to
24 bool header_changed(struct tdb_context *tdb)
28 if (!(tdb->flags & TDB_NOLOCK) && tdb->header_uptodate) {
29 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
30 "warning: header uptodate already\n");
33 /* We could get a partial update if we're not holding any locks. */
34 assert((tdb->flags & TDB_NOLOCK) || tdb_has_locks(tdb));
36 tdb->header_uptodate = true;
37 gen = tdb_read_off(tdb, offsetof(struct tdb_header, v.generation));
38 if (unlikely(gen != tdb->header.v.generation)) {
39 tdb_read_convert(tdb, offsetof(struct tdb_header, v),
40 &tdb->header.v, sizeof(tdb->header.v));
46 int write_header(struct tdb_context *tdb)
48 assert(tdb_read_off(tdb, offsetof(struct tdb_header, v.generation))
49 == tdb->header.v.generation);
50 tdb->header.v.generation++;
51 return tdb_write_convert(tdb, offsetof(struct tdb_header, v),
52 &tdb->header.v, sizeof(tdb->header.v));
55 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
58 return hash64_stable((const unsigned char *)key, length, seed);
61 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
63 return tdb->khash(ptr, len, tdb->header.hash_seed, tdb->hash_priv);
66 static bool tdb_already_open(dev_t device, ino_t ino)
68 struct tdb_context *i;
70 for (i = tdbs; i; i = i->next) {
71 if (i->device == device && i->inode == ino) {
79 static uint64_t random_number(struct tdb_context *tdb)
85 fd = open("/dev/urandom", O_RDONLY);
87 if (tdb_read_all(fd, &ret, sizeof(ret))) {
88 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
89 "tdb_open: random from /dev/urandom\n");
95 /* FIXME: Untested! Based on Wikipedia protocol description! */
96 fd = open("/dev/egd-pool", O_RDWR);
98 /* Command is 1, next byte is size we want to read. */
99 char cmd[2] = { 1, sizeof(uint64_t) };
100 if (write(fd, cmd, sizeof(cmd)) == sizeof(cmd)) {
101 char reply[1 + sizeof(uint64_t)];
102 int r = read(fd, reply, sizeof(reply));
104 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
105 "tdb_open: %u random bytes from"
106 " /dev/egd-pool\n", r-1);
107 /* Copy at least some bytes. */
108 memcpy(&ret, reply+1, r - 1);
109 if (reply[0] == sizeof(uint64_t)
110 && r == sizeof(reply)) {
119 /* Fallback: pid and time. */
120 gettimeofday(&now, NULL);
121 ret = getpid() * 100132289ULL + now.tv_sec * 1000000ULL + now.tv_usec;
122 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
123 "tdb_open: random from getpid and time\n");
127 struct new_database {
128 struct tdb_header hdr;
129 struct tdb_used_record hrec;
130 tdb_off_t hash[1ULL << INITIAL_HASH_BITS];
131 struct tdb_used_record frec;
132 tdb_off_t free[INITIAL_FREE_BUCKETS + 1]; /* One overflow bucket */
135 /* initialise a new database */
136 static int tdb_new_database(struct tdb_context *tdb)
138 /* We make it up in memory, then write it out if not internal */
139 struct new_database newdb;
140 unsigned int magic_off = offsetof(struct tdb_header, magic_food);
142 /* Fill in the header */
143 newdb.hdr.version = TDB_VERSION;
144 newdb.hdr.hash_seed = random_number(tdb);
145 newdb.hdr.hash_test = TDB_HASH_MAGIC;
146 newdb.hdr.hash_test = tdb->khash(&newdb.hdr.hash_test,
147 sizeof(newdb.hdr.hash_test),
151 newdb.hdr.v.generation = 0;
153 /* The initial zone must cover the initial database size! */
154 BUILD_ASSERT((1ULL << INITIAL_ZONE_BITS) >= sizeof(newdb));
156 /* Free array has 1 zone, 10 buckets. All buckets empty. */
157 newdb.hdr.v.num_zones = 1;
158 newdb.hdr.v.zone_bits = INITIAL_ZONE_BITS;
159 newdb.hdr.v.free_buckets = INITIAL_FREE_BUCKETS;
160 newdb.hdr.v.free_off = offsetof(struct new_database, free);
161 set_header(tdb, &newdb.frec, 0,
162 sizeof(newdb.free), sizeof(newdb.free), 0);
163 memset(newdb.free, 0, sizeof(newdb.free));
165 /* Initial hashes are empty. */
166 newdb.hdr.v.hash_bits = INITIAL_HASH_BITS;
167 newdb.hdr.v.hash_off = offsetof(struct new_database, hash);
168 set_header(tdb, &newdb.hrec, 0,
169 sizeof(newdb.hash), sizeof(newdb.hash), 0);
170 memset(newdb.hash, 0, sizeof(newdb.hash));
173 memset(newdb.hdr.magic_food, 0, sizeof(newdb.hdr.magic_food));
174 strcpy(newdb.hdr.magic_food, TDB_MAGIC_FOOD);
176 /* This creates an endian-converted database, as if read from disk */
178 (char *)&newdb.hdr + magic_off,
179 sizeof(newdb) - magic_off);
181 tdb->header = newdb.hdr;
183 if (tdb->flags & TDB_INTERNAL) {
184 tdb->map_size = sizeof(newdb);
185 tdb->map_ptr = malloc(tdb->map_size);
187 tdb->ecode = TDB_ERR_OOM;
190 memcpy(tdb->map_ptr, &newdb, tdb->map_size);
193 if (lseek(tdb->fd, 0, SEEK_SET) == -1)
196 if (ftruncate(tdb->fd, 0) == -1)
199 if (!tdb_pwrite_all(tdb->fd, &newdb, sizeof(newdb), 0)) {
200 tdb->ecode = TDB_ERR_IO;
206 struct tdb_context *tdb_open(const char *name, int tdb_flags,
207 int open_flags, mode_t mode,
208 union tdb_attribute *attr)
210 struct tdb_context *tdb;
216 tdb = malloc(sizeof(*tdb));
225 /* map_size will be set below. */
226 tdb->ecode = TDB_SUCCESS;
227 /* header will be read in below. */
228 tdb->header_uptodate = false;
229 tdb->flags = tdb_flags;
230 tdb->log = null_log_fn;
231 tdb->log_priv = NULL;
232 tdb->khash = jenkins_hash;
233 tdb->hash_priv = NULL;
234 tdb->transaction = NULL;
235 /* last_zone will be set below. */
240 switch (attr->base.attr) {
241 case TDB_ATTRIBUTE_LOG:
242 tdb->log = attr->log.log_fn;
243 tdb->log_priv = attr->log.log_private;
245 case TDB_ATTRIBUTE_HASH:
246 tdb->khash = attr->hash.hash_fn;
247 tdb->hash_priv = attr->hash.hash_private;
250 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
251 "tdb_open: unknown attribute type %u\n",
256 attr = attr->base.next;
259 if ((open_flags & O_ACCMODE) == O_WRONLY) {
260 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
261 "tdb_open: can't open tdb %s write-only\n", name);
266 if ((open_flags & O_ACCMODE) == O_RDONLY) {
267 tdb->read_only = true;
268 /* read only databases don't do locking */
269 tdb->flags |= TDB_NOLOCK;
271 tdb->read_only = false;
273 /* internal databases don't need any of the rest. */
274 if (tdb->flags & TDB_INTERNAL) {
275 tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP);
276 if (tdb_new_database(tdb) != 0) {
277 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
278 "tdb_open: tdb_new_database failed!");
281 TEST_IT(tdb->flags & TDB_CONVERT);
282 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
283 /* Zones don't matter for internal db. */
288 if ((tdb->fd = open(name, open_flags, mode)) == -1) {
289 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
290 "tdb_open: could not open file %s: %s\n",
291 name, strerror(errno));
292 goto fail; /* errno set by open(2) */
295 /* on exec, don't inherit the fd */
296 v = fcntl(tdb->fd, F_GETFD, 0);
297 fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC);
299 /* ensure there is only one process initialising at once */
300 if (tdb_lock_open(tdb) == -1) {
301 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
302 "tdb_open: failed to get open lock on %s: %s\n",
303 name, strerror(errno));
304 goto fail; /* errno set by tdb_brlock */
307 if (!tdb_pread_all(tdb->fd, &tdb->header, sizeof(tdb->header), 0)
308 || strcmp(tdb->header.magic_food, TDB_MAGIC_FOOD) != 0) {
309 if (!(open_flags & O_CREAT) || tdb_new_database(tdb) == -1) {
311 errno = EIO; /* ie bad format or something */
315 } else if (tdb->header.version != TDB_VERSION) {
316 if (tdb->header.version == bswap_64(TDB_VERSION))
317 tdb->flags |= TDB_CONVERT;
320 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
321 "tdb_open: %s is unknown version 0x%llx\n",
322 name, (long long)tdb->header.version);
328 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
329 hash_test = TDB_HASH_MAGIC;
330 hash_test = tdb->khash(&hash_test, sizeof(hash_test),
331 tdb->header.hash_seed, tdb->hash_priv);
332 if (tdb->header.hash_test != hash_test) {
333 /* wrong hash variant */
334 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
335 "tdb_open: %s uses a different hash function\n",
341 if (fstat(tdb->fd, &st) == -1)
344 /* Is it already in the open list? If so, fail. */
345 if (tdb_already_open(st.st_dev, st.st_ino)) {
347 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
348 "tdb_open: %s (%d,%d) is already open in this process\n",
349 name, (int)st.st_dev, (int)st.st_ino);
354 tdb->name = strdup(name);
360 tdb->map_size = st.st_size;
361 tdb->device = st.st_dev;
362 tdb->inode = st.st_ino;
364 tdb_unlock_open(tdb);
381 if (tdb->flags & TDB_INTERNAL) {
386 free((char *)tdb->name);
388 if (close(tdb->fd) != 0)
389 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
390 "tdb_open: failed to close tdb->fd"
397 static tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
399 return tdb->header.v.hash_off
400 + ((list & ((1ULL << tdb->header.v.hash_bits) - 1))
401 * sizeof(tdb_off_t));
404 /* Returns 0 if the entry is a zero (definitely not a match).
405 * Returns a valid entry offset if it's a match. Fills in rec.
406 * Otherwise returns TDB_OFF_ERR: keep searching. */
407 static tdb_off_t entry_matches(struct tdb_context *tdb,
410 const struct tdb_data *key,
411 struct tdb_used_record *rec)
415 const unsigned char *rkey;
417 list &= ((1ULL << tdb->header.v.hash_bits) - 1);
419 off = tdb_read_off(tdb, tdb->header.v.hash_off
420 + list * sizeof(tdb_off_t));
421 if (off == 0 || off == TDB_OFF_ERR)
424 #if 0 /* FIXME: Check other bits. */
425 unsigned int bits, bitmask, hoffextra;
426 /* Bottom three bits show how many extra hash bits. */
427 bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1;
428 bitmask = (1 << bits)-1;
429 hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask);
430 uint64_t hextra = hash >> tdb->header.v.hash_bits;
431 if ((hextra & bitmask) != hoffextra)
436 if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
439 /* FIXME: check extra bits in header! */
440 keylen = rec_key_length(rec);
441 if (keylen != key->dsize)
444 rkey = tdb_access_read(tdb, off + sizeof(*rec), keylen, false);
447 if (memcmp(rkey, key->dptr, keylen) != 0)
449 tdb_access_release(tdb, rkey);
453 /* FIXME: Optimize? */
454 static void unlock_lists(struct tdb_context *tdb,
455 tdb_off_t list, tdb_len_t num,
460 for (i = list; i < list + num; i++)
461 tdb_unlock_list(tdb, i, ltype);
464 /* FIXME: Optimize? */
465 static int lock_lists(struct tdb_context *tdb,
466 tdb_off_t list, tdb_len_t num,
471 for (i = list; i < list + num; i++) {
472 if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT)
474 unlock_lists(tdb, list, i - list, ltype);
481 /* We lock hashes up to the next empty offset. We already hold the
482 * lock on the start bucket, but we may need to release and re-grab
483 * it. If we fail, we hold no locks at all! */
484 static tdb_len_t relock_hash_to_zero(struct tdb_context *tdb,
485 tdb_off_t start, int ltype)
490 num = 1ULL << tdb->header.v.hash_bits;
491 len = tdb_find_zero_off(tdb, hash_off(tdb, start), num - start);
492 if (unlikely(len == num - start)) {
493 /* We hit the end of the hash range. Drop lock: we have
494 to lock start of hash first. */
497 tdb_unlock_list(tdb, start, ltype);
499 /* Grab something, so header is stable. */
500 if (tdb_lock_list(tdb, 0, ltype, TDB_LOCK_WAIT))
502 pre_locks = tdb_find_zero_off(tdb, hash_off(tdb, 0), num);
503 /* We want to lock the zero entry as well. */
505 if (lock_lists(tdb, 1, pre_locks - 1, ltype) == -1) {
506 tdb_unlock_list(tdb, 0, ltype);
510 /* Now lock later ones. */
511 if (unlikely(lock_lists(tdb, start, len, ltype) == -1)) {
512 unlock_lists(tdb, 0, pre_locks, ltype);
517 /* We want to lock the zero entry as well. */
519 /* But we already have lock on start. */
520 if (unlikely(lock_lists(tdb, start+1, len-1, ltype) == -1)) {
521 tdb_unlock_list(tdb, start, ltype);
526 /* Now, did we lose the race, and it's not zero any more? */
527 if (unlikely(tdb_read_off(tdb, hash_off(tdb, start + len - 1)) != 0)) {
528 /* Leave the start locked, as expected. */
529 unlock_lists(tdb, start + 1, len - 1, ltype);
536 /* FIXME: modify, don't rewrite! */
537 static int update_rec_hdr(struct tdb_context *tdb,
541 struct tdb_used_record *rec,
544 uint64_t dataroom = rec_data_length(rec) + rec_extra_padding(rec);
546 if (set_header(tdb, rec, keylen, datalen, keylen + dataroom, h))
549 return tdb_write_convert(tdb, off, rec, sizeof(*rec));
552 /* If we fail, others will try after us. */
553 static void enlarge_hash(struct tdb_context *tdb)
555 tdb_off_t newoff, oldoff, i;
557 uint64_t h, num = 1ULL << tdb->header.v.hash_bits;
558 struct tdb_used_record pad, *r;
560 /* FIXME: We should do this without holding locks throughout. */
561 if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
564 /* Someone else enlarged for us? Nothing to do. */
565 if ((1ULL << tdb->header.v.hash_bits) != num)
568 /* Allocate our new array. */
569 hlen = num * sizeof(tdb_off_t) * 2;
570 newoff = alloc(tdb, 0, hlen, 0, false);
571 if (unlikely(newoff == TDB_OFF_ERR))
573 if (unlikely(newoff == 0)) {
574 if (tdb_expand(tdb, 0, hlen, false) == -1)
576 newoff = alloc(tdb, 0, hlen, 0, false);
577 if (newoff == TDB_OFF_ERR || newoff == 0)
580 /* Step over record header! */
581 newoff += sizeof(struct tdb_used_record);
583 /* Starts all zero. */
584 if (zero_out(tdb, newoff, hlen) == -1)
587 /* FIXME: If the space before is empty, we know this is in its ideal
588 * location. Or steal a bit from the pointer to avoid rehash. */
589 for (i = tdb_find_nonzero_off(tdb, hash_off(tdb, 0), num);
591 i += tdb_find_nonzero_off(tdb, hash_off(tdb, i), num - i)) {
593 off = tdb_read_off(tdb, hash_off(tdb, i));
594 if (unlikely(off == TDB_OFF_ERR))
596 if (unlikely(!off)) {
597 tdb->ecode = TDB_ERR_CORRUPT;
598 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
599 "find_bucket_and_lock: zero hash bucket!\n");
603 /* Find next empty hash slot. */
604 for (h = hash_record(tdb, off);
605 tdb_read_off(tdb, newoff + (h & ((num * 2)-1))
606 * sizeof(tdb_off_t)) != 0;
609 /* FIXME: Encode extra hash bits! */
610 if (tdb_write_off(tdb, newoff + (h & ((num * 2)-1))
611 * sizeof(tdb_off_t), off) == -1)
616 /* Free up old hash. */
617 oldoff = tdb->header.v.hash_off - sizeof(*r);
618 r = tdb_get(tdb, oldoff, &pad, sizeof(*r));
621 add_free_record(tdb, oldoff,
622 sizeof(*r)+rec_data_length(r)+rec_extra_padding(r));
624 /* Now we write the modified header. */
625 tdb->header.v.hash_bits++;
626 tdb->header.v.hash_off = newoff;
629 tdb_allrecord_unlock(tdb, F_WRLCK);
632 int tdb_store(struct tdb_context *tdb,
633 struct tdb_data key, struct tdb_data dbuf, int flag)
635 tdb_off_t new_off, off, old_bucket, start, num_locks = 1;
636 struct tdb_used_record rec;
638 bool growing = false;
640 h = tdb_hash(tdb, key.dptr, key.dsize);
642 /* FIXME: can we avoid locks for some fast paths? */
643 start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
644 if (start == TDB_OFF_ERR)
649 off = entry_matches(tdb, start, h, &key, &rec);
650 if (unlikely(off == TDB_OFF_ERR)) {
651 /* Slow path, need to grab more locks and search. */
654 /* Warning: this may drop the lock! Does that on error. */
655 num_locks = relock_hash_to_zero(tdb, start, F_WRLCK);
656 if (num_locks == TDB_OFF_ERR)
659 for (i = start; i < start + num_locks; i++) {
660 off = entry_matches(tdb, i, h, &key, &rec);
661 /* Empty entry or we found it? */
662 if (off == 0 || off != TDB_OFF_ERR)
665 if (i == start + num_locks)
668 /* Even if not found, this is where we put the new entry. */
672 /* Now we have lock on this hash bucket. */
673 if (flag == TDB_INSERT) {
675 tdb->ecode = TDB_ERR_EXISTS;
680 if (rec_data_length(&rec) + rec_extra_padding(&rec)
683 if (update_rec_hdr(tdb, off,
684 key.dsize, dbuf.dsize,
689 /* FIXME: See if right record is free? */
690 /* Hint to allocator that we've realloced. */
693 if (flag == TDB_MODIFY) {
694 /* if the record doesn't exist and we
695 are in TDB_MODIFY mode then we should fail
697 tdb->ecode = TDB_ERR_NOEXIST;
703 /* Allocate a new record. */
704 new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
706 unlock_lists(tdb, start, num_locks, F_WRLCK);
707 /* Expand, then try again... */
708 if (tdb_expand(tdb, key.dsize, dbuf.dsize, growing) == -1)
710 return tdb_store(tdb, key, dbuf, flag);
713 /* We didn't like the existing one: remove it. */
715 add_free_record(tdb, off, sizeof(struct tdb_used_record)
716 + rec_key_length(&rec)
717 + rec_data_length(&rec)
718 + rec_extra_padding(&rec));
721 /* FIXME: Encode extra hash bits! */
722 if (tdb_write_off(tdb, hash_off(tdb, old_bucket), new_off) == -1)
726 off = new_off + sizeof(struct tdb_used_record);
727 if (tdb->methods->write(tdb, off, key.dptr, key.dsize) == -1)
730 if (tdb->methods->write(tdb, off, dbuf.dptr, dbuf.dsize) == -1)
733 /* FIXME: tdb_increment_seqnum(tdb); */
734 unlock_lists(tdb, start, num_locks, F_WRLCK);
736 /* FIXME: by simple simulation, this approximated 60% full.
737 * Check in real case! */
738 if (unlikely(num_locks > 4 * tdb->header.v.hash_bits - 30))
744 unlock_lists(tdb, start, num_locks, F_WRLCK);
748 struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
750 tdb_off_t off, start, num_locks = 1;
751 struct tdb_used_record rec;
755 h = tdb_hash(tdb, key.dptr, key.dsize);
757 /* FIXME: can we avoid locks for some fast paths? */
758 start = tdb_lock_list(tdb, h, F_RDLCK, TDB_LOCK_WAIT);
759 if (start == TDB_OFF_ERR)
763 off = entry_matches(tdb, start, h, &key, &rec);
764 if (unlikely(off == TDB_OFF_ERR)) {
765 /* Slow path, need to grab more locks and search. */
768 /* Warning: this may drop the lock! Does that on error. */
769 num_locks = relock_hash_to_zero(tdb, start, F_RDLCK);
770 if (num_locks == TDB_OFF_ERR)
773 for (i = start; i < start + num_locks; i++) {
774 off = entry_matches(tdb, i, h, &key, &rec);
775 /* Empty entry or we found it? */
776 if (off == 0 || off != TDB_OFF_ERR)
779 if (i == start + num_locks)
784 unlock_lists(tdb, start, num_locks, F_RDLCK);
785 tdb->ecode = TDB_ERR_NOEXIST;
789 ret.dsize = rec_data_length(&rec);
790 ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize,
792 unlock_lists(tdb, start, num_locks, F_RDLCK);
796 static int hash_add(struct tdb_context *tdb, uint64_t h, tdb_off_t off)
798 tdb_off_t i, hoff, len, num;
800 /* Look for next space. */
801 i = (h & ((1ULL << tdb->header.v.hash_bits) - 1));
802 len = (1ULL << tdb->header.v.hash_bits) - i;
803 num = tdb_find_zero_off(tdb, hash_off(tdb, i), len);
805 if (unlikely(num == len)) {
806 /* We wrapped. Look through start of hash table. */
807 hoff = hash_off(tdb, 0);
808 len = (1ULL << tdb->header.v.hash_bits);
809 num = tdb_find_zero_off(tdb, hoff, len);
811 tdb->ecode = TDB_ERR_CORRUPT;
812 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
813 "hash_add: full hash table!\n");
817 /* FIXME: Encode extra hash bits! */
818 return tdb_write_off(tdb, hash_off(tdb, i + num), off);
821 int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
823 tdb_off_t i, old_bucket, off, start, num_locks = 1;
824 struct tdb_used_record rec;
827 h = tdb_hash(tdb, key.dptr, key.dsize);
829 /* FIXME: can we avoid locks for some fast paths? */
830 start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
831 if (start == TDB_OFF_ERR)
836 off = entry_matches(tdb, start, h, &key, &rec);
837 if (off && off != TDB_OFF_ERR) {
838 /* We can only really fastpath delete if next bucket
839 * is 0. Note that we haven't locked it, but our lock
840 * on this bucket stops anyone overflowing into it
842 if (tdb_read_off(tdb, hash_off(tdb, h+1)) == 0)
848 if (unlikely(off == TDB_OFF_ERR)) {
849 /* Slow path, need to grab more locks and search. */
852 /* Warning: this may drop the lock! Does that on error. */
853 num_locks = relock_hash_to_zero(tdb, start, F_WRLCK);
854 if (num_locks == TDB_OFF_ERR)
857 for (i = start; i < start + num_locks; i++) {
858 off = entry_matches(tdb, i, h, &key, &rec);
859 /* Empty entry or we found it? */
860 if (off == 0 || off != TDB_OFF_ERR) {
865 if (i == start + num_locks)
870 unlock_lists(tdb, start, num_locks, F_WRLCK);
871 tdb->ecode = TDB_ERR_NOEXIST;
876 /* This actually unlinks it. */
877 if (tdb_write_off(tdb, hash_off(tdb, old_bucket), 0) == -1)
880 /* Rehash anything following. */
881 for (i = hash_off(tdb, old_bucket+1);
882 i != hash_off(tdb, h + num_locks);
883 i += sizeof(tdb_off_t)) {
887 off2 = tdb_read_off(tdb, i);
888 if (unlikely(off2 == TDB_OFF_ERR))
891 /* Maybe use a bit to indicate it is in ideal place? */
892 h2 = hash_record(tdb, off2);
893 /* Is it happy where it is? */
894 if (hash_off(tdb, h2) == i)
898 if (tdb_write_off(tdb, i, 0) == -1)
902 if (hash_add(tdb, h2, off2) == -1)
906 /* Free the deleted entry. */
907 if (add_free_record(tdb, off,
908 sizeof(struct tdb_used_record)
909 + rec_key_length(&rec)
910 + rec_data_length(&rec)
911 + rec_extra_padding(&rec)) != 0)
914 unlock_lists(tdb, start, num_locks, F_WRLCK);
918 unlock_lists(tdb, start, num_locks, F_WRLCK);
922 int tdb_close(struct tdb_context *tdb)
924 struct tdb_context **i;
928 if (tdb->transaction) {
929 tdb_transaction_cancel(tdb);
932 tdb_trace(tdb, "tdb_close");
935 if (tdb->flags & TDB_INTERNAL)
940 free((char *)tdb->name);
942 ret = close(tdb->fd);
947 /* Remove from contexts list */
948 for (i = &tdbs; *i; i = &(*i)->next) {
963 enum TDB_ERROR tdb_error(struct tdb_context *tdb)