2 #include <ccan/tdb2/tdb2.h>
3 #include <ccan/hash/hash.h>
4 #include <ccan/build_assert/build_assert.h>
5 #include <ccan/likely/likely.h>
9 struct tdb_data tdb_null = { .dptr = NULL, .dsize = 0 };
11 /* all contexts, to ensure no double-opens (fcntl locks don't nest!) */
12 static struct tdb_context *tdbs = NULL;
14 PRINTF_ATTRIBUTE(4, 5) static void
15 null_log_fn(struct tdb_context *tdb,
16 enum tdb_debug_level level, void *priv,
21 /* We do a lot of work assuming our copy of the header volatile area
22 * is uptodate, and usually it is. However, once we grab a lock, we have to
24 bool header_changed(struct tdb_context *tdb)
28 if (!(tdb->flags & TDB_NOLOCK) && tdb->header_uptodate) {
29 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
30 "warning: header uptodate already\n");
33 /* We could get a partial update if we're not holding any locks. */
34 assert((tdb->flags & TDB_NOLOCK) || tdb_has_locks(tdb));
36 tdb->header_uptodate = true;
37 gen = tdb_read_off(tdb, offsetof(struct tdb_header, v.generation));
38 if (unlikely(gen != tdb->header.v.generation)) {
39 tdb_read_convert(tdb, offsetof(struct tdb_header, v),
40 &tdb->header.v, sizeof(tdb->header.v));
46 int write_header(struct tdb_context *tdb)
48 assert(tdb_read_off(tdb, offsetof(struct tdb_header, v.generation))
49 == tdb->header.v.generation);
50 tdb->header.v.generation++;
51 return tdb_write_convert(tdb, offsetof(struct tdb_header, v),
52 &tdb->header.v, sizeof(tdb->header.v));
55 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
58 return hash64_stable((const unsigned char *)key, length, seed);
61 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
63 return tdb->khash(ptr, len, tdb->header.hash_seed, tdb->hash_priv);
66 static bool tdb_already_open(dev_t device, ino_t ino)
68 struct tdb_context *i;
70 for (i = tdbs; i; i = i->next) {
71 if (i->device == device && i->inode == ino) {
79 static uint64_t random_number(struct tdb_context *tdb)
85 fd = open("/dev/urandom", O_RDONLY);
87 if (tdb_read_all(fd, &ret, sizeof(ret))) {
88 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
89 "tdb_open: random from /dev/urandom\n");
95 /* FIXME: Untested! Based on Wikipedia protocol description! */
96 fd = open("/dev/egd-pool", O_RDWR);
98 /* Command is 1, next byte is size we want to read. */
99 char cmd[2] = { 1, sizeof(uint64_t) };
100 if (write(fd, cmd, sizeof(cmd)) == sizeof(cmd)) {
101 char reply[1 + sizeof(uint64_t)];
102 int r = read(fd, reply, sizeof(reply));
104 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
105 "tdb_open: %u random bytes from"
106 " /dev/egd-pool\n", r-1);
107 /* Copy at least some bytes. */
108 memcpy(&ret, reply+1, r - 1);
109 if (reply[0] == sizeof(uint64_t)
110 && r == sizeof(reply)) {
119 /* Fallback: pid and time. */
120 gettimeofday(&now, NULL);
121 ret = getpid() * 100132289ULL + now.tv_sec * 1000000ULL + now.tv_usec;
122 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
123 "tdb_open: random from getpid and time\n");
128 struct tdb_header hdr;
129 struct free_zone_header zhdr;
130 tdb_off_t free[BUCKETS_FOR_ZONE(INITIAL_ZONE_BITS) + 1];
131 struct tdb_used_record hrec;
132 tdb_off_t hash[1ULL << INITIAL_HASH_BITS];
133 struct tdb_free_record frec;
136 struct new_database {
137 struct new_db_head h;
138 /* Rest up to 1 << INITIAL_ZONE_BITS is empty. */
139 char space[(1 << INITIAL_ZONE_BITS)
140 - (sizeof(struct new_db_head) - sizeof(struct tdb_header))];
142 /* Don't count final padding! */
145 /* initialise a new database */
146 static int tdb_new_database(struct tdb_context *tdb)
148 /* We make it up in memory, then write it out if not internal */
149 struct new_database newdb;
150 unsigned int bucket, magic_off, dbsize;
152 /* Don't want any extra padding! */
153 dbsize = offsetof(struct new_database, tailer) + sizeof(newdb.tailer);
155 /* Fill in the header */
156 newdb.h.hdr.version = TDB_VERSION;
157 newdb.h.hdr.hash_seed = random_number(tdb);
158 newdb.h.hdr.hash_test = TDB_HASH_MAGIC;
159 newdb.h.hdr.hash_test = tdb->khash(&newdb.h.hdr.hash_test,
160 sizeof(newdb.h.hdr.hash_test),
161 newdb.h.hdr.hash_seed,
163 memset(newdb.h.hdr.reserved, 0, sizeof(newdb.h.hdr.reserved));
164 newdb.h.hdr.v.generation = 0;
165 /* Initial hashes are empty. */
166 newdb.h.hdr.v.hash_bits = INITIAL_HASH_BITS;
167 newdb.h.hdr.v.hash_off = offsetof(struct new_database, h.hash);
168 set_header(tdb, &newdb.h.hrec, 0,
169 sizeof(newdb.h.hash), sizeof(newdb.h.hash), 0,
171 memset(newdb.h.hash, 0, sizeof(newdb.h.hash));
173 /* Create the single free entry. */
174 newdb.h.frec.magic_and_meta = TDB_FREE_MAGIC | INITIAL_ZONE_BITS;
175 newdb.h.frec.data_len = (sizeof(newdb.h.frec)
176 - sizeof(struct tdb_used_record)
177 + sizeof(newdb.space));
179 /* Free is mostly empty... */
180 newdb.h.zhdr.zone_bits = INITIAL_ZONE_BITS;
181 memset(newdb.h.free, 0, sizeof(newdb.h.free));
183 /* ... except for this one bucket. */
184 bucket = size_to_bucket(INITIAL_ZONE_BITS, newdb.h.frec.data_len);
185 newdb.h.free[bucket] = offsetof(struct new_database, h.frec);
186 newdb.h.frec.next = newdb.h.frec.prev = 0;
188 /* Tailer contains maximum number of free_zone bits. */
189 newdb.tailer = INITIAL_ZONE_BITS;
192 memset(newdb.h.hdr.magic_food, 0, sizeof(newdb.h.hdr.magic_food));
193 strcpy(newdb.h.hdr.magic_food, TDB_MAGIC_FOOD);
195 /* This creates an endian-converted database, as if read from disk */
196 magic_off = offsetof(struct tdb_header, magic_food);
198 (char *)&newdb.h.hdr + magic_off,
199 dbsize - 1 - magic_off);
201 tdb->header = newdb.h.hdr;
203 if (tdb->flags & TDB_INTERNAL) {
204 tdb->map_size = dbsize;
205 tdb->map_ptr = malloc(tdb->map_size);
207 tdb->ecode = TDB_ERR_OOM;
210 memcpy(tdb->map_ptr, &newdb, tdb->map_size);
213 if (lseek(tdb->fd, 0, SEEK_SET) == -1)
216 if (ftruncate(tdb->fd, 0) == -1)
219 if (!tdb_pwrite_all(tdb->fd, &newdb, dbsize, 0)) {
220 tdb->ecode = TDB_ERR_IO;
226 struct tdb_context *tdb_open(const char *name, int tdb_flags,
227 int open_flags, mode_t mode,
228 union tdb_attribute *attr)
230 struct tdb_context *tdb;
236 tdb = malloc(sizeof(*tdb));
245 tdb->map_size = sizeof(struct tdb_header);
246 tdb->ecode = TDB_SUCCESS;
247 /* header will be read in below. */
248 tdb->header_uptodate = false;
249 tdb->flags = tdb_flags;
250 tdb->log = null_log_fn;
251 tdb->log_priv = NULL;
252 tdb->khash = jenkins_hash;
253 tdb->hash_priv = NULL;
254 tdb->transaction = NULL;
255 /* last_zone will be set below. */
260 switch (attr->base.attr) {
261 case TDB_ATTRIBUTE_LOG:
262 tdb->log = attr->log.log_fn;
263 tdb->log_priv = attr->log.log_private;
265 case TDB_ATTRIBUTE_HASH:
266 tdb->khash = attr->hash.hash_fn;
267 tdb->hash_priv = attr->hash.hash_private;
270 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
271 "tdb_open: unknown attribute type %u\n",
276 attr = attr->base.next;
279 if ((open_flags & O_ACCMODE) == O_WRONLY) {
280 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
281 "tdb_open: can't open tdb %s write-only\n", name);
286 if ((open_flags & O_ACCMODE) == O_RDONLY) {
287 tdb->read_only = true;
288 /* read only databases don't do locking */
289 tdb->flags |= TDB_NOLOCK;
291 tdb->read_only = false;
293 /* internal databases don't need any of the rest. */
294 if (tdb->flags & TDB_INTERNAL) {
295 tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP);
296 if (tdb_new_database(tdb) != 0) {
297 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
298 "tdb_open: tdb_new_database failed!");
301 TEST_IT(tdb->flags & TDB_CONVERT);
302 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
307 if ((tdb->fd = open(name, open_flags, mode)) == -1) {
308 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
309 "tdb_open: could not open file %s: %s\n",
310 name, strerror(errno));
311 goto fail; /* errno set by open(2) */
314 /* on exec, don't inherit the fd */
315 v = fcntl(tdb->fd, F_GETFD, 0);
316 fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC);
318 /* ensure there is only one process initialising at once */
319 if (tdb_lock_open(tdb) == -1) {
320 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
321 "tdb_open: failed to get open lock on %s: %s\n",
322 name, strerror(errno));
323 goto fail; /* errno set by tdb_brlock */
326 if (!tdb_pread_all(tdb->fd, &tdb->header, sizeof(tdb->header), 0)
327 || strcmp(tdb->header.magic_food, TDB_MAGIC_FOOD) != 0) {
328 if (!(open_flags & O_CREAT) || tdb_new_database(tdb) == -1) {
330 errno = EIO; /* ie bad format or something */
334 } else if (tdb->header.version != TDB_VERSION) {
335 if (tdb->header.version == bswap_64(TDB_VERSION))
336 tdb->flags |= TDB_CONVERT;
339 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
340 "tdb_open: %s is unknown version 0x%llx\n",
341 name, (long long)tdb->header.version);
347 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
348 hash_test = TDB_HASH_MAGIC;
349 hash_test = tdb->khash(&hash_test, sizeof(hash_test),
350 tdb->header.hash_seed, tdb->hash_priv);
351 if (tdb->header.hash_test != hash_test) {
352 /* wrong hash variant */
353 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
354 "tdb_open: %s uses a different hash function\n",
360 if (fstat(tdb->fd, &st) == -1)
363 /* Is it already in the open list? If so, fail. */
364 if (tdb_already_open(st.st_dev, st.st_ino)) {
366 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
367 "tdb_open: %s (%d,%d) is already open in this process\n",
368 name, (int)st.st_dev, (int)st.st_ino);
373 tdb->name = strdup(name);
379 tdb->device = st.st_dev;
380 tdb->inode = st.st_ino;
381 tdb_unlock_open(tdb);
383 /* This make sure we have current map_size and mmap. */
384 tdb->methods->oob(tdb, tdb->map_size + 1, true);
386 /* Now we can pick a random free zone to start from. */
387 if (tdb_zone_init(tdb) == -1)
404 if (tdb->flags & TDB_INTERNAL) {
409 free((char *)tdb->name);
411 if (close(tdb->fd) != 0)
412 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
413 "tdb_open: failed to close tdb->fd"
420 tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
422 return tdb->header.v.hash_off
423 + ((list & ((1ULL << tdb->header.v.hash_bits) - 1))
424 * sizeof(tdb_off_t));
427 /* Returns 0 if the entry is a zero (definitely not a match).
428 * Returns a valid entry offset if it's a match. Fills in rec.
429 * Otherwise returns TDB_OFF_ERR: keep searching. */
430 static tdb_off_t entry_matches(struct tdb_context *tdb,
433 const struct tdb_data *key,
434 struct tdb_used_record *rec)
438 const unsigned char *rkey;
440 list &= ((1ULL << tdb->header.v.hash_bits) - 1);
442 off = tdb_read_off(tdb, tdb->header.v.hash_off
443 + list * sizeof(tdb_off_t));
444 if (off == 0 || off == TDB_OFF_ERR)
447 #if 0 /* FIXME: Check other bits. */
448 unsigned int bits, bitmask, hoffextra;
449 /* Bottom three bits show how many extra hash bits. */
450 bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1;
451 bitmask = (1 << bits)-1;
452 hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask);
453 uint64_t hextra = hash >> tdb->header.v.hash_bits;
454 if ((hextra & bitmask) != hoffextra)
459 if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
462 /* FIXME: check extra bits in header! */
463 keylen = rec_key_length(rec);
464 if (keylen != key->dsize)
467 rkey = tdb_access_read(tdb, off + sizeof(*rec), keylen, false);
470 if (memcmp(rkey, key->dptr, keylen) != 0)
472 tdb_access_release(tdb, rkey);
476 /* FIXME: Optimize? */
477 static void unlock_lists(struct tdb_context *tdb,
478 tdb_off_t list, tdb_len_t num,
483 for (i = list; i < list + num; i++)
484 tdb_unlock_list(tdb, i, ltype);
487 /* FIXME: Optimize? */
488 static int lock_lists(struct tdb_context *tdb,
489 tdb_off_t list, tdb_len_t num,
494 for (i = list; i < list + num; i++) {
495 if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT)
497 unlock_lists(tdb, list, i - list, ltype);
504 /* We lock hashes up to the next empty offset. We already hold the
505 * lock on the start bucket, but we may need to release and re-grab
506 * it. If we fail, we hold no locks at all! */
507 static tdb_len_t relock_hash_to_zero(struct tdb_context *tdb,
508 tdb_off_t start, int ltype)
513 num = 1ULL << tdb->header.v.hash_bits;
514 len = tdb_find_zero_off(tdb, hash_off(tdb, start), num - start);
515 if (unlikely(len == num - start)) {
516 /* We hit the end of the hash range. Drop lock: we have
517 to lock start of hash first. */
520 tdb_unlock_list(tdb, start, ltype);
522 /* Grab something, so header is stable. */
523 if (tdb_lock_list(tdb, 0, ltype, TDB_LOCK_WAIT))
525 pre_locks = tdb_find_zero_off(tdb, hash_off(tdb, 0), num);
526 /* We want to lock the zero entry as well. */
528 if (lock_lists(tdb, 1, pre_locks - 1, ltype) == -1) {
529 tdb_unlock_list(tdb, 0, ltype);
533 /* Now lock later ones. */
534 if (unlikely(lock_lists(tdb, start, len, ltype) == -1)) {
535 unlock_lists(tdb, 0, pre_locks, ltype);
540 /* We want to lock the zero entry as well. */
542 /* But we already have lock on start. */
543 if (unlikely(lock_lists(tdb, start+1, len-1, ltype) == -1)) {
544 tdb_unlock_list(tdb, start, ltype);
549 /* Now, did we lose the race, and it's not zero any more? */
550 if (unlikely(tdb_read_off(tdb, hash_off(tdb, start + len - 1)) != 0)) {
551 /* Leave the start locked, as expected. */
552 unlock_lists(tdb, start + 1, len - 1, ltype);
559 /* FIXME: modify, don't rewrite! */
560 static int update_rec_hdr(struct tdb_context *tdb,
564 struct tdb_used_record *rec,
567 uint64_t dataroom = rec_data_length(rec) + rec_extra_padding(rec);
569 if (set_header(tdb, rec, keylen, datalen, keylen + dataroom, h,
573 return tdb_write_convert(tdb, off, rec, sizeof(*rec));
576 static int hash_add(struct tdb_context *tdb,
577 uint64_t hash, tdb_off_t off)
579 tdb_off_t i, hoff, len, num;
581 /* Look for next space. */
582 i = (hash & ((1ULL << tdb->header.v.hash_bits) - 1));
583 len = (1ULL << tdb->header.v.hash_bits) - i;
584 num = tdb_find_zero_off(tdb, hash_off(tdb, i), len);
586 if (unlikely(num == len)) {
587 /* We wrapped. Look through start of hash table. */
589 hoff = hash_off(tdb, 0);
590 len = (1ULL << tdb->header.v.hash_bits);
591 num = tdb_find_zero_off(tdb, hoff, len);
593 tdb->ecode = TDB_ERR_CORRUPT;
594 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
595 "hash_add: full hash table!\n");
599 if (tdb_read_off(tdb, hash_off(tdb, i + num)) != 0) {
600 tdb->ecode = TDB_ERR_CORRUPT;
601 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
602 "hash_add: overwriting hash table?\n");
606 /* FIXME: Encode extra hash bits! */
607 return tdb_write_off(tdb, hash_off(tdb, i + num), off);
610 /* If we fail, others will try after us. */
611 static void enlarge_hash(struct tdb_context *tdb)
613 tdb_off_t newoff, oldoff, i;
615 uint64_t num = 1ULL << tdb->header.v.hash_bits;
616 struct tdb_used_record pad, *r;
617 unsigned int records = 0;
619 /* FIXME: We should do this without holding locks throughout. */
620 if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
623 /* Someone else enlarged for us? Nothing to do. */
624 if ((1ULL << tdb->header.v.hash_bits) != num)
628 /* Allocate our new array. */
629 hlen = num * sizeof(tdb_off_t) * 2;
630 newoff = alloc(tdb, 0, hlen, 0, false);
631 if (unlikely(newoff == TDB_OFF_ERR))
633 if (unlikely(newoff == 0)) {
634 if (tdb_expand(tdb, 0, hlen, false) == -1)
638 /* Step over record header! */
639 newoff += sizeof(struct tdb_used_record);
641 /* Starts all zero. */
642 if (zero_out(tdb, newoff, hlen) == -1)
645 /* Update header now so we can use normal routines. */
646 oldoff = tdb->header.v.hash_off;
648 tdb->header.v.hash_bits++;
649 tdb->header.v.hash_off = newoff;
651 /* FIXME: If the space before is empty, we know this is in its ideal
652 * location. Or steal a bit from the pointer to avoid rehash. */
653 for (i = 0; i < num; i++) {
655 off = tdb_read_off(tdb, oldoff + i * sizeof(tdb_off_t));
656 if (unlikely(off == TDB_OFF_ERR))
658 if (off && hash_add(tdb, hash_record(tdb, off), off) == -1)
664 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
665 "enlarge_hash: moved %u records from %llu buckets.\n",
666 records, (long long)num);
668 /* Free up old hash. */
669 r = tdb_get(tdb, oldoff - sizeof(*r), &pad, sizeof(*r));
672 add_free_record(tdb, rec_zone_bits(r), oldoff - sizeof(*r),
673 sizeof(*r)+rec_data_length(r)+rec_extra_padding(r));
675 /* Now we write the modified header. */
678 tdb_allrecord_unlock(tdb, F_WRLCK);
682 tdb->header.v.hash_bits--;
683 tdb->header.v.hash_off = oldoff;
688 /* This is the slow version of the routine which searches the
689 * hashtable for an entry.
690 * We lock every hash bucket up to and including the next zero one.
692 static tdb_off_t find_and_lock_slow(struct tdb_context *tdb,
696 tdb_off_t *start_lock,
697 tdb_len_t *num_locks,
699 struct tdb_used_record *rec)
701 /* Warning: this may drop the lock on *bucket! */
702 *num_locks = relock_hash_to_zero(tdb, *start_lock, ltype);
703 if (*num_locks == TDB_OFF_ERR)
706 for (*bucket = *start_lock;
707 *bucket < *start_lock + *num_locks;
709 tdb_off_t off = entry_matches(tdb, *bucket, h, &key, rec);
710 /* Empty entry or we found it? */
711 if (off == 0 || off != TDB_OFF_ERR)
715 /* We didn't find a zero entry? Something went badly wrong... */
716 unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype);
717 tdb->ecode = TDB_ERR_CORRUPT;
718 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
719 "find_and_lock: expected to find an empty hash bucket!\n");
723 /* This is the core routine which searches the hashtable for an entry.
724 * On error, no locks are held and TDB_OFF_ERR is returned.
725 * Otherwise, *num_locks locks of type ltype from *start_lock are held.
726 * The bucket where the entry is (or would be) is in *bucket.
727 * If not found, the return value is 0.
728 * If found, the return value is the offset, and *rec is the record. */
729 static tdb_off_t find_and_lock(struct tdb_context *tdb,
733 tdb_off_t *start_lock,
734 tdb_len_t *num_locks,
736 struct tdb_used_record *rec)
740 /* FIXME: can we avoid locks for some fast paths? */
741 *start_lock = tdb_lock_list(tdb, h, ltype, TDB_LOCK_WAIT);
742 if (*start_lock == TDB_OFF_ERR)
746 off = entry_matches(tdb, *start_lock, h, &key, rec);
747 if (likely(off != TDB_OFF_ERR)) {
748 *bucket = *start_lock;
753 /* Slow path, need to grab more locks and search. */
754 return find_and_lock_slow(tdb, key, h, ltype, start_lock, num_locks,
758 /* Returns -1 on error, 0 on OK, 1 on "expand and retry." */
759 static int replace_data(struct tdb_context *tdb,
760 uint64_t h, struct tdb_data key, struct tdb_data dbuf,
762 tdb_off_t old_off, tdb_len_t old_room,
768 /* Allocate a new record. */
769 new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
770 if (unlikely(new_off == TDB_OFF_ERR))
773 if (unlikely(new_off == 0))
776 /* We didn't like the existing one: remove it. */
778 add_free_record(tdb, old_zone, old_off,
779 sizeof(struct tdb_used_record)
780 + key.dsize + old_room);
782 /* FIXME: Encode extra hash bits! */
783 if (tdb_write_off(tdb, hash_off(tdb, bucket), new_off) == -1)
786 new_off += sizeof(struct tdb_used_record);
787 if (tdb->methods->write(tdb, new_off, key.dptr, key.dsize) == -1)
790 new_off += key.dsize;
791 if (tdb->methods->write(tdb, new_off, dbuf.dptr, dbuf.dsize) == -1)
794 /* FIXME: tdb_increment_seqnum(tdb); */
798 int tdb_store(struct tdb_context *tdb,
799 struct tdb_data key, struct tdb_data dbuf, int flag)
801 tdb_off_t off, bucket, start, num;
802 tdb_len_t old_room = 0;
803 struct tdb_used_record rec;
807 h = tdb_hash(tdb, key.dptr, key.dsize);
808 off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
809 if (unlikely(off == TDB_OFF_ERR))
812 /* Now we have lock on this hash bucket. */
813 if (flag == TDB_INSERT) {
815 tdb->ecode = TDB_ERR_EXISTS;
820 old_room = rec_data_length(&rec)
821 + rec_extra_padding(&rec);
822 if (old_room >= dbuf.dsize) {
823 /* Can modify in-place. Easy! */
824 if (update_rec_hdr(tdb, off,
825 key.dsize, dbuf.dsize,
828 if (tdb->methods->write(tdb, off + sizeof(rec)
830 dbuf.dptr, dbuf.dsize))
832 unlock_lists(tdb, start, num, F_WRLCK);
835 /* FIXME: See if right record is free? */
837 if (flag == TDB_MODIFY) {
838 /* if the record doesn't exist and we
839 are in TDB_MODIFY mode then we should fail
841 tdb->ecode = TDB_ERR_NOEXIST;
847 /* If we didn't use the old record, this implies we're growing. */
848 ret = replace_data(tdb, h, key, dbuf, bucket, off, old_room,
849 rec_zone_bits(&rec), off != 0);
850 unlock_lists(tdb, start, num, F_WRLCK);
852 if (unlikely(ret == 1)) {
853 /* Expand, then try again... */
854 if (tdb_expand(tdb, key.dsize, dbuf.dsize, off != 0) == -1)
856 return tdb_store(tdb, key, dbuf, flag);
859 /* FIXME: by simple simulation, this approximated 60% full.
860 * Check in real case! */
861 if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
867 unlock_lists(tdb, start, num, F_WRLCK);
871 int tdb_append(struct tdb_context *tdb,
872 struct tdb_data key, struct tdb_data dbuf)
874 tdb_off_t off, bucket, start, num;
875 struct tdb_used_record rec;
876 tdb_len_t old_room = 0, old_dlen;
878 unsigned char *newdata;
879 struct tdb_data new_dbuf;
882 h = tdb_hash(tdb, key.dptr, key.dsize);
883 off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
884 if (unlikely(off == TDB_OFF_ERR))
888 old_dlen = rec_data_length(&rec);
889 old_room = old_dlen + rec_extra_padding(&rec);
891 /* Fast path: can append in place. */
892 if (rec_extra_padding(&rec) >= dbuf.dsize) {
893 if (update_rec_hdr(tdb, off, key.dsize,
894 old_dlen + dbuf.dsize, &rec, h))
897 off += sizeof(rec) + key.dsize + old_dlen;
898 if (tdb->methods->write(tdb, off, dbuf.dptr,
902 /* FIXME: tdb_increment_seqnum(tdb); */
903 unlock_lists(tdb, start, num, F_WRLCK);
906 /* FIXME: Check right record free? */
909 newdata = malloc(key.dsize + old_dlen + dbuf.dsize);
911 tdb->ecode = TDB_ERR_OOM;
912 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
913 "tdb_append: cannot allocate %llu bytes!\n",
914 (long long)key.dsize + old_dlen + dbuf.dsize);
917 if (tdb->methods->read(tdb, off + sizeof(rec) + key.dsize,
918 newdata, old_dlen) != 0) {
922 memcpy(newdata + old_dlen, dbuf.dptr, dbuf.dsize);
923 new_dbuf.dptr = newdata;
924 new_dbuf.dsize = old_dlen + dbuf.dsize;
930 /* If they're using tdb_append(), it implies they're growing record. */
931 ret = replace_data(tdb, h, key, new_dbuf, bucket, off, old_room,
932 rec_zone_bits(&rec), true);
933 unlock_lists(tdb, start, num, F_WRLCK);
936 if (unlikely(ret == 1)) {
937 /* Expand, then try again. */
938 if (tdb_expand(tdb, key.dsize, dbuf.dsize, true) == -1)
940 return tdb_append(tdb, key, dbuf);
943 /* FIXME: by simple simulation, this approximated 60% full.
944 * Check in real case! */
945 if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
951 unlock_lists(tdb, start, num, F_WRLCK);
955 struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
957 tdb_off_t off, start, num, bucket;
958 struct tdb_used_record rec;
962 h = tdb_hash(tdb, key.dptr, key.dsize);
963 off = find_and_lock(tdb, key, h, F_RDLCK, &start, &num, &bucket, &rec);
964 if (unlikely(off == TDB_OFF_ERR))
968 tdb->ecode = TDB_ERR_NOEXIST;
971 ret.dsize = rec_data_length(&rec);
972 ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize,
976 unlock_lists(tdb, start, num, F_RDLCK);
980 int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
982 tdb_off_t i, bucket, off, start, num;
983 struct tdb_used_record rec;
986 h = tdb_hash(tdb, key.dptr, key.dsize);
987 start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
988 if (unlikely(start == TDB_OFF_ERR))
991 /* FIXME: Fastpath: if next is zero, we can delete without lock,
992 * since this lock protects us. */
993 off = find_and_lock_slow(tdb, key, h, F_WRLCK,
994 &start, &num, &bucket, &rec);
995 if (unlikely(off == TDB_OFF_ERR))
999 /* FIXME: We could optimize not found case if it mattered, by
1000 * reading offset after first lock: if it's zero, goto here. */
1001 unlock_lists(tdb, start, num, F_WRLCK);
1002 tdb->ecode = TDB_ERR_NOEXIST;
1005 /* Since we found the entry, we must have locked it and a zero. */
1008 /* This actually unlinks it. */
1009 if (tdb_write_off(tdb, hash_off(tdb, bucket), 0) == -1)
1012 /* Rehash anything following. */
1013 for (i = bucket+1; i != bucket + num - 1; i++) {
1014 tdb_off_t hoff, off2;
1017 hoff = hash_off(tdb, i);
1018 off2 = tdb_read_off(tdb, hoff);
1019 if (unlikely(off2 == TDB_OFF_ERR))
1022 /* This can happen if we raced. */
1023 if (unlikely(off2 == 0))
1026 /* Maybe use a bit to indicate it is in ideal place? */
1027 h2 = hash_record(tdb, off2);
1028 /* Is it happy where it is? */
1029 if (hash_off(tdb, h2) == hoff)
1033 if (tdb_write_off(tdb, hoff, 0) == -1)
1037 if (hash_add(tdb, h2, off2) == -1)
1041 /* Free the deleted entry. */
1042 if (add_free_record(tdb, rec_zone_bits(&rec), off,
1043 sizeof(struct tdb_used_record)
1044 + rec_key_length(&rec)
1045 + rec_data_length(&rec)
1046 + rec_extra_padding(&rec)) != 0)
1049 unlock_lists(tdb, start, num, F_WRLCK);
1053 unlock_lists(tdb, start, num, F_WRLCK);
1057 int tdb_close(struct tdb_context *tdb)
1059 struct tdb_context **i;
1063 if (tdb->transaction) {
1064 tdb_transaction_cancel(tdb);
1067 tdb_trace(tdb, "tdb_close");
1070 if (tdb->flags & TDB_INTERNAL)
1075 free((char *)tdb->name);
1076 if (tdb->fd != -1) {
1077 ret = close(tdb->fd);
1080 free(tdb->lockrecs);
1082 /* Remove from contexts list */
1083 for (i = &tdbs; *i; i = &(*i)->next) {
1091 close(tdb->tracefd);
1098 enum TDB_ERROR tdb_error(struct tdb_context *tdb)