2 #include <ccan/tdb2/tdb2.h>
3 #include <ccan/hash/hash.h>
4 #include <ccan/build_assert/build_assert.h>
5 #include <ccan/likely/likely.h>
9 struct tdb_data tdb_null = { .dptr = NULL, .dsize = 0 };
11 /* all contexts, to ensure no double-opens (fcntl locks don't nest!) */
12 static struct tdb_context *tdbs = NULL;
14 PRINTF_ATTRIBUTE(4, 5) static void
15 null_log_fn(struct tdb_context *tdb,
16 enum tdb_debug_level level, void *priv,
21 /* We do a lot of work assuming our copy of the header volatile area
22 * is uptodate, and usually it is. However, once we grab a lock, we have to
24 bool header_changed(struct tdb_context *tdb)
28 if (!(tdb->flags & TDB_NOLOCK) && tdb->header_uptodate) {
29 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
30 "warning: header uptodate already\n");
33 /* We could get a partial update if we're not holding any locks. */
34 assert((tdb->flags & TDB_NOLOCK) || tdb_has_locks(tdb));
36 tdb->header_uptodate = true;
37 gen = tdb_read_off(tdb, offsetof(struct tdb_header, v.generation));
38 if (unlikely(gen != tdb->header.v.generation)) {
39 tdb_read_convert(tdb, offsetof(struct tdb_header, v),
40 &tdb->header.v, sizeof(tdb->header.v));
46 int write_header(struct tdb_context *tdb)
48 assert(tdb_read_off(tdb, offsetof(struct tdb_header, v.generation))
49 == tdb->header.v.generation);
50 tdb->header.v.generation++;
51 return tdb_write_convert(tdb, offsetof(struct tdb_header, v),
52 &tdb->header.v, sizeof(tdb->header.v));
55 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
58 return hash64_stable((const unsigned char *)key, length, seed);
61 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
63 return tdb->khash(ptr, len, tdb->header.hash_seed, tdb->hash_priv);
66 static bool tdb_already_open(dev_t device, ino_t ino)
68 struct tdb_context *i;
70 for (i = tdbs; i; i = i->next) {
71 if (i->device == device && i->inode == ino) {
79 static uint64_t random_number(struct tdb_context *tdb)
85 fd = open("/dev/urandom", O_RDONLY);
87 if (tdb_read_all(fd, &ret, sizeof(ret))) {
88 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
89 "tdb_open: random from /dev/urandom\n");
95 /* FIXME: Untested! Based on Wikipedia protocol description! */
96 fd = open("/dev/egd-pool", O_RDWR);
98 /* Command is 1, next byte is size we want to read. */
99 char cmd[2] = { 1, sizeof(uint64_t) };
100 if (write(fd, cmd, sizeof(cmd)) == sizeof(cmd)) {
101 char reply[1 + sizeof(uint64_t)];
102 int r = read(fd, reply, sizeof(reply));
104 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
105 "tdb_open: %u random bytes from"
106 " /dev/egd-pool\n", r-1);
107 /* Copy at least some bytes. */
108 memcpy(&ret, reply+1, r - 1);
109 if (reply[0] == sizeof(uint64_t)
110 && r == sizeof(reply)) {
119 /* Fallback: pid and time. */
120 gettimeofday(&now, NULL);
121 ret = getpid() * 100132289ULL + now.tv_sec * 1000000ULL + now.tv_usec;
122 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
123 "tdb_open: random from getpid and time\n");
128 struct tdb_header hdr;
129 struct free_zone_header zhdr;
130 tdb_off_t free[BUCKETS_FOR_ZONE(INITIAL_ZONE_BITS) + 1];
131 struct tdb_used_record hrec;
132 tdb_off_t hash[1ULL << INITIAL_HASH_BITS];
133 struct tdb_free_record frec;
136 struct new_database {
137 struct new_db_head h;
138 /* Rest up to 1 << INITIAL_ZONE_BITS is empty. */
139 char space[(1 << INITIAL_ZONE_BITS)
140 - (sizeof(struct new_db_head) - sizeof(struct tdb_header))];
142 /* Don't count final padding! */
145 /* initialise a new database */
146 static int tdb_new_database(struct tdb_context *tdb)
148 /* We make it up in memory, then write it out if not internal */
149 struct new_database newdb;
150 unsigned int bucket, magic_off, dbsize;
152 /* Don't want any extra padding! */
153 dbsize = offsetof(struct new_database, tailer) + sizeof(newdb.tailer);
155 /* Fill in the header */
156 newdb.h.hdr.version = TDB_VERSION;
157 newdb.h.hdr.hash_seed = random_number(tdb);
158 newdb.h.hdr.hash_test = TDB_HASH_MAGIC;
159 newdb.h.hdr.hash_test = tdb->khash(&newdb.h.hdr.hash_test,
160 sizeof(newdb.h.hdr.hash_test),
161 newdb.h.hdr.hash_seed,
163 memset(newdb.h.hdr.reserved, 0, sizeof(newdb.h.hdr.reserved));
164 newdb.h.hdr.v.generation = 0;
165 /* Initial hashes are empty. */
166 newdb.h.hdr.v.hash_bits = INITIAL_HASH_BITS;
167 newdb.h.hdr.v.hash_off = offsetof(struct new_database, h.hash);
168 set_header(tdb, &newdb.h.hrec, 0,
169 sizeof(newdb.h.hash), sizeof(newdb.h.hash), 0,
171 memset(newdb.h.hash, 0, sizeof(newdb.h.hash));
173 /* Create the single free entry. */
174 newdb.h.frec.magic_and_meta = TDB_FREE_MAGIC | INITIAL_ZONE_BITS;
175 newdb.h.frec.data_len = (sizeof(newdb.h.frec)
176 - sizeof(struct tdb_used_record)
177 + sizeof(newdb.space));
179 /* Free is mostly empty... */
180 newdb.h.zhdr.zone_bits = INITIAL_ZONE_BITS;
181 memset(newdb.h.free, 0, sizeof(newdb.h.free));
183 /* ... except for this one bucket. */
184 bucket = size_to_bucket(INITIAL_ZONE_BITS, newdb.h.frec.data_len);
185 newdb.h.free[bucket] = offsetof(struct new_database, h.frec);
186 newdb.h.frec.next = newdb.h.frec.prev = 0;
188 /* Clear free space to keep valgrind happy, and avoid leaking stack. */
189 memset(newdb.space, 0, sizeof(newdb.space));
191 /* Tailer contains maximum number of free_zone bits. */
192 newdb.tailer = INITIAL_ZONE_BITS;
195 memset(newdb.h.hdr.magic_food, 0, sizeof(newdb.h.hdr.magic_food));
196 strcpy(newdb.h.hdr.magic_food, TDB_MAGIC_FOOD);
198 /* This creates an endian-converted database, as if read from disk */
199 magic_off = offsetof(struct tdb_header, magic_food);
201 (char *)&newdb.h.hdr + magic_off,
202 dbsize - 1 - magic_off);
204 tdb->header = newdb.h.hdr;
206 if (tdb->flags & TDB_INTERNAL) {
207 tdb->map_size = dbsize;
208 tdb->map_ptr = malloc(tdb->map_size);
210 tdb->ecode = TDB_ERR_OOM;
213 memcpy(tdb->map_ptr, &newdb, tdb->map_size);
216 if (lseek(tdb->fd, 0, SEEK_SET) == -1)
219 if (ftruncate(tdb->fd, 0) == -1)
222 if (!tdb_pwrite_all(tdb->fd, &newdb, dbsize, 0)) {
223 tdb->ecode = TDB_ERR_IO;
229 struct tdb_context *tdb_open(const char *name, int tdb_flags,
230 int open_flags, mode_t mode,
231 union tdb_attribute *attr)
233 struct tdb_context *tdb;
239 tdb = malloc(sizeof(*tdb));
248 tdb->map_size = sizeof(struct tdb_header);
249 tdb->ecode = TDB_SUCCESS;
250 /* header will be read in below. */
251 tdb->header_uptodate = false;
252 tdb->flags = tdb_flags;
253 tdb->log = null_log_fn;
254 tdb->log_priv = NULL;
255 tdb->khash = jenkins_hash;
256 tdb->hash_priv = NULL;
257 tdb->transaction = NULL;
258 /* last_zone will be set below. */
263 switch (attr->base.attr) {
264 case TDB_ATTRIBUTE_LOG:
265 tdb->log = attr->log.log_fn;
266 tdb->log_priv = attr->log.log_private;
268 case TDB_ATTRIBUTE_HASH:
269 tdb->khash = attr->hash.hash_fn;
270 tdb->hash_priv = attr->hash.hash_private;
273 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
274 "tdb_open: unknown attribute type %u\n",
279 attr = attr->base.next;
282 if ((open_flags & O_ACCMODE) == O_WRONLY) {
283 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
284 "tdb_open: can't open tdb %s write-only\n", name);
289 if ((open_flags & O_ACCMODE) == O_RDONLY) {
290 tdb->read_only = true;
291 /* read only databases don't do locking */
292 tdb->flags |= TDB_NOLOCK;
294 tdb->read_only = false;
296 /* internal databases don't need any of the rest. */
297 if (tdb->flags & TDB_INTERNAL) {
298 tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP);
299 if (tdb_new_database(tdb) != 0) {
300 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
301 "tdb_open: tdb_new_database failed!");
304 TEST_IT(tdb->flags & TDB_CONVERT);
305 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
310 if ((tdb->fd = open(name, open_flags, mode)) == -1) {
311 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
312 "tdb_open: could not open file %s: %s\n",
313 name, strerror(errno));
314 goto fail; /* errno set by open(2) */
317 /* on exec, don't inherit the fd */
318 v = fcntl(tdb->fd, F_GETFD, 0);
319 fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC);
321 /* ensure there is only one process initialising at once */
322 if (tdb_lock_open(tdb) == -1) {
323 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
324 "tdb_open: failed to get open lock on %s: %s\n",
325 name, strerror(errno));
326 goto fail; /* errno set by tdb_brlock */
329 if (!tdb_pread_all(tdb->fd, &tdb->header, sizeof(tdb->header), 0)
330 || strcmp(tdb->header.magic_food, TDB_MAGIC_FOOD) != 0) {
331 if (!(open_flags & O_CREAT) || tdb_new_database(tdb) == -1) {
333 errno = EIO; /* ie bad format or something */
337 } else if (tdb->header.version != TDB_VERSION) {
338 if (tdb->header.version == bswap_64(TDB_VERSION))
339 tdb->flags |= TDB_CONVERT;
342 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
343 "tdb_open: %s is unknown version 0x%llx\n",
344 name, (long long)tdb->header.version);
350 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
351 hash_test = TDB_HASH_MAGIC;
352 hash_test = tdb->khash(&hash_test, sizeof(hash_test),
353 tdb->header.hash_seed, tdb->hash_priv);
354 if (tdb->header.hash_test != hash_test) {
355 /* wrong hash variant */
356 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
357 "tdb_open: %s uses a different hash function\n",
363 if (fstat(tdb->fd, &st) == -1)
366 /* Is it already in the open list? If so, fail. */
367 if (tdb_already_open(st.st_dev, st.st_ino)) {
369 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
370 "tdb_open: %s (%d,%d) is already open in this process\n",
371 name, (int)st.st_dev, (int)st.st_ino);
376 tdb->name = strdup(name);
382 tdb->device = st.st_dev;
383 tdb->inode = st.st_ino;
384 tdb_unlock_open(tdb);
386 /* This make sure we have current map_size and mmap. */
387 tdb->methods->oob(tdb, tdb->map_size + 1, true);
389 /* Now we can pick a random free zone to start from. */
390 if (tdb_zone_init(tdb) == -1)
407 if (tdb->flags & TDB_INTERNAL) {
412 free((char *)tdb->name);
414 if (close(tdb->fd) != 0)
415 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
416 "tdb_open: failed to close tdb->fd"
423 tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
425 return tdb->header.v.hash_off
426 + ((list & ((1ULL << tdb->header.v.hash_bits) - 1))
427 * sizeof(tdb_off_t));
430 /* Returns 0 if the entry is a zero (definitely not a match).
431 * Returns a valid entry offset if it's a match. Fills in rec.
432 * Otherwise returns TDB_OFF_ERR: keep searching. */
433 static tdb_off_t entry_matches(struct tdb_context *tdb,
436 const struct tdb_data *key,
437 struct tdb_used_record *rec)
441 const unsigned char *rkey;
443 list &= ((1ULL << tdb->header.v.hash_bits) - 1);
445 off = tdb_read_off(tdb, tdb->header.v.hash_off
446 + list * sizeof(tdb_off_t));
447 if (off == 0 || off == TDB_OFF_ERR)
450 #if 0 /* FIXME: Check other bits. */
451 unsigned int bits, bitmask, hoffextra;
452 /* Bottom three bits show how many extra hash bits. */
453 bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1;
454 bitmask = (1 << bits)-1;
455 hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask);
456 uint64_t hextra = hash >> tdb->header.v.hash_bits;
457 if ((hextra & bitmask) != hoffextra)
462 if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
465 /* FIXME: check extra bits in header! */
466 keylen = rec_key_length(rec);
467 if (keylen != key->dsize)
470 rkey = tdb_access_read(tdb, off + sizeof(*rec), keylen, false);
473 if (memcmp(rkey, key->dptr, keylen) != 0)
475 tdb_access_release(tdb, rkey);
479 /* FIXME: Optimize? */
480 static void unlock_lists(struct tdb_context *tdb,
481 tdb_off_t list, tdb_len_t num,
486 for (i = list; i < list + num; i++)
487 tdb_unlock_list(tdb, i, ltype);
490 /* FIXME: Optimize? */
491 static int lock_lists(struct tdb_context *tdb,
492 tdb_off_t list, tdb_len_t num,
497 for (i = list; i < list + num; i++) {
498 if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT)
500 unlock_lists(tdb, list, i - list, ltype);
507 /* We lock hashes up to the next empty offset. We already hold the
508 * lock on the start bucket, but we may need to release and re-grab
509 * it. If we fail, we hold no locks at all! */
510 static tdb_len_t relock_hash_to_zero(struct tdb_context *tdb,
511 tdb_off_t start, int ltype)
516 num = 1ULL << tdb->header.v.hash_bits;
517 len = tdb_find_zero_off(tdb, hash_off(tdb, start), num - start);
518 if (unlikely(len == num - start)) {
519 /* We hit the end of the hash range. Drop lock: we have
520 to lock start of hash first. */
523 tdb_unlock_list(tdb, start, ltype);
525 /* Grab something, so header is stable. */
526 if (tdb_lock_list(tdb, 0, ltype, TDB_LOCK_WAIT))
528 pre_locks = tdb_find_zero_off(tdb, hash_off(tdb, 0), num);
529 /* We want to lock the zero entry as well. */
531 if (lock_lists(tdb, 1, pre_locks - 1, ltype) == -1) {
532 tdb_unlock_list(tdb, 0, ltype);
536 /* Now lock later ones. */
537 if (unlikely(lock_lists(tdb, start, len, ltype) == -1)) {
538 unlock_lists(tdb, 0, pre_locks, ltype);
543 /* We want to lock the zero entry as well. */
545 /* But we already have lock on start. */
546 if (unlikely(lock_lists(tdb, start+1, len-1, ltype) == -1)) {
547 tdb_unlock_list(tdb, start, ltype);
552 /* Now, did we lose the race, and it's not zero any more? */
553 if (unlikely(tdb_read_off(tdb, hash_off(tdb, start + len - 1)) != 0)) {
554 /* Leave the start locked, as expected. */
555 unlock_lists(tdb, start + 1, len - 1, ltype);
562 /* FIXME: modify, don't rewrite! */
563 static int update_rec_hdr(struct tdb_context *tdb,
567 struct tdb_used_record *rec,
570 uint64_t dataroom = rec_data_length(rec) + rec_extra_padding(rec);
572 if (set_header(tdb, rec, keylen, datalen, keylen + dataroom, h,
576 return tdb_write_convert(tdb, off, rec, sizeof(*rec));
579 static int hash_add(struct tdb_context *tdb,
580 uint64_t hash, tdb_off_t off)
582 tdb_off_t i, hoff, len, num;
584 /* Look for next space. */
585 i = (hash & ((1ULL << tdb->header.v.hash_bits) - 1));
586 len = (1ULL << tdb->header.v.hash_bits) - i;
587 num = tdb_find_zero_off(tdb, hash_off(tdb, i), len);
589 if (unlikely(num == len)) {
590 /* We wrapped. Look through start of hash table. */
592 hoff = hash_off(tdb, 0);
593 len = (1ULL << tdb->header.v.hash_bits);
594 num = tdb_find_zero_off(tdb, hoff, len);
596 tdb->ecode = TDB_ERR_CORRUPT;
597 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
598 "hash_add: full hash table!\n");
602 if (tdb_read_off(tdb, hash_off(tdb, i + num)) != 0) {
603 tdb->ecode = TDB_ERR_CORRUPT;
604 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
605 "hash_add: overwriting hash table?\n");
609 /* FIXME: Encode extra hash bits! */
610 return tdb_write_off(tdb, hash_off(tdb, i + num), off);
613 /* If we fail, others will try after us. */
614 static void enlarge_hash(struct tdb_context *tdb)
616 tdb_off_t newoff, oldoff, i;
618 uint64_t num = 1ULL << tdb->header.v.hash_bits;
619 struct tdb_used_record pad, *r;
620 unsigned int records = 0;
622 /* FIXME: We should do this without holding locks throughout. */
623 if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
626 /* Someone else enlarged for us? Nothing to do. */
627 if ((1ULL << tdb->header.v.hash_bits) != num)
630 /* Allocate our new array. */
631 hlen = num * sizeof(tdb_off_t) * 2;
632 newoff = alloc(tdb, 0, hlen, 0, false);
633 if (unlikely(newoff == TDB_OFF_ERR))
635 /* Step over record header! */
636 newoff += sizeof(struct tdb_used_record);
638 /* Starts all zero. */
639 if (zero_out(tdb, newoff, hlen) == -1)
642 /* Update header now so we can use normal routines. */
643 oldoff = tdb->header.v.hash_off;
645 tdb->header.v.hash_bits++;
646 tdb->header.v.hash_off = newoff;
648 /* FIXME: If the space before is empty, we know this is in its ideal
649 * location. Or steal a bit from the pointer to avoid rehash. */
650 for (i = 0; i < num; i++) {
652 off = tdb_read_off(tdb, oldoff + i * sizeof(tdb_off_t));
653 if (unlikely(off == TDB_OFF_ERR))
655 if (off && hash_add(tdb, hash_record(tdb, off), off) == -1)
661 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
662 "enlarge_hash: moved %u records from %llu buckets.\n",
663 records, (long long)num);
665 /* Free up old hash. */
666 r = tdb_get(tdb, oldoff - sizeof(*r), &pad, sizeof(*r));
669 add_free_record(tdb, rec_zone_bits(r), oldoff - sizeof(*r),
670 sizeof(*r)+rec_data_length(r)+rec_extra_padding(r));
672 /* Now we write the modified header. */
675 tdb_allrecord_unlock(tdb, F_WRLCK);
679 tdb->header.v.hash_bits--;
680 tdb->header.v.hash_off = oldoff;
685 /* This is the slow version of the routine which searches the
686 * hashtable for an entry.
687 * We lock every hash bucket up to and including the next zero one.
689 static tdb_off_t find_and_lock_slow(struct tdb_context *tdb,
693 tdb_off_t *start_lock,
694 tdb_len_t *num_locks,
696 struct tdb_used_record *rec)
698 /* Warning: this may drop the lock on *bucket! */
699 *num_locks = relock_hash_to_zero(tdb, *start_lock, ltype);
700 if (*num_locks == TDB_OFF_ERR)
703 for (*bucket = *start_lock;
704 *bucket < *start_lock + *num_locks;
706 tdb_off_t off = entry_matches(tdb, *bucket, h, &key, rec);
707 /* Empty entry or we found it? */
708 if (off == 0 || off != TDB_OFF_ERR)
712 /* We didn't find a zero entry? Something went badly wrong... */
713 unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype);
714 tdb->ecode = TDB_ERR_CORRUPT;
715 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
716 "find_and_lock: expected to find an empty hash bucket!\n");
720 /* This is the core routine which searches the hashtable for an entry.
721 * On error, no locks are held and TDB_OFF_ERR is returned.
722 * Otherwise, *num_locks locks of type ltype from *start_lock are held.
723 * The bucket where the entry is (or would be) is in *bucket.
724 * If not found, the return value is 0.
725 * If found, the return value is the offset, and *rec is the record. */
726 static tdb_off_t find_and_lock(struct tdb_context *tdb,
730 tdb_off_t *start_lock,
731 tdb_len_t *num_locks,
733 struct tdb_used_record *rec)
737 /* FIXME: can we avoid locks for some fast paths? */
738 *start_lock = tdb_lock_list(tdb, h, ltype, TDB_LOCK_WAIT);
739 if (*start_lock == TDB_OFF_ERR)
743 off = entry_matches(tdb, *start_lock, h, &key, rec);
744 if (likely(off != TDB_OFF_ERR)) {
745 *bucket = *start_lock;
750 /* Slow path, need to grab more locks and search. */
751 return find_and_lock_slow(tdb, key, h, ltype, start_lock, num_locks,
755 /* Returns -1 on error, 0 on OK" */
756 static int replace_data(struct tdb_context *tdb,
757 uint64_t h, struct tdb_data key, struct tdb_data dbuf,
759 tdb_off_t old_off, tdb_len_t old_room,
765 /* Allocate a new record. */
766 new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
767 if (unlikely(new_off == TDB_OFF_ERR))
770 /* We didn't like the existing one: remove it. */
772 add_free_record(tdb, old_zone, old_off,
773 sizeof(struct tdb_used_record)
774 + key.dsize + old_room);
776 /* FIXME: Encode extra hash bits! */
777 if (tdb_write_off(tdb, hash_off(tdb, bucket), new_off) == -1)
780 new_off += sizeof(struct tdb_used_record);
781 if (tdb->methods->write(tdb, new_off, key.dptr, key.dsize) == -1)
784 new_off += key.dsize;
785 if (tdb->methods->write(tdb, new_off, dbuf.dptr, dbuf.dsize) == -1)
788 /* FIXME: tdb_increment_seqnum(tdb); */
792 int tdb_store(struct tdb_context *tdb,
793 struct tdb_data key, struct tdb_data dbuf, int flag)
795 tdb_off_t off, bucket, start, num;
796 tdb_len_t old_room = 0;
797 struct tdb_used_record rec;
801 h = tdb_hash(tdb, key.dptr, key.dsize);
802 off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
803 if (unlikely(off == TDB_OFF_ERR))
806 /* Now we have lock on this hash bucket. */
807 if (flag == TDB_INSERT) {
809 tdb->ecode = TDB_ERR_EXISTS;
814 old_room = rec_data_length(&rec)
815 + rec_extra_padding(&rec);
816 if (old_room >= dbuf.dsize) {
817 /* Can modify in-place. Easy! */
818 if (update_rec_hdr(tdb, off,
819 key.dsize, dbuf.dsize,
822 if (tdb->methods->write(tdb, off + sizeof(rec)
824 dbuf.dptr, dbuf.dsize))
826 unlock_lists(tdb, start, num, F_WRLCK);
829 /* FIXME: See if right record is free? */
831 if (flag == TDB_MODIFY) {
832 /* if the record doesn't exist and we
833 are in TDB_MODIFY mode then we should fail
835 tdb->ecode = TDB_ERR_NOEXIST;
841 /* If we didn't use the old record, this implies we're growing. */
842 ret = replace_data(tdb, h, key, dbuf, bucket, off, old_room,
843 rec_zone_bits(&rec), off != 0);
844 unlock_lists(tdb, start, num, F_WRLCK);
846 /* FIXME: by simple simulation, this approximated 60% full.
847 * Check in real case! */
848 if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
854 unlock_lists(tdb, start, num, F_WRLCK);
858 int tdb_append(struct tdb_context *tdb,
859 struct tdb_data key, struct tdb_data dbuf)
861 tdb_off_t off, bucket, start, num;
862 struct tdb_used_record rec;
863 tdb_len_t old_room = 0, old_dlen;
865 unsigned char *newdata;
866 struct tdb_data new_dbuf;
869 h = tdb_hash(tdb, key.dptr, key.dsize);
870 off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
871 if (unlikely(off == TDB_OFF_ERR))
875 old_dlen = rec_data_length(&rec);
876 old_room = old_dlen + rec_extra_padding(&rec);
878 /* Fast path: can append in place. */
879 if (rec_extra_padding(&rec) >= dbuf.dsize) {
880 if (update_rec_hdr(tdb, off, key.dsize,
881 old_dlen + dbuf.dsize, &rec, h))
884 off += sizeof(rec) + key.dsize + old_dlen;
885 if (tdb->methods->write(tdb, off, dbuf.dptr,
889 /* FIXME: tdb_increment_seqnum(tdb); */
890 unlock_lists(tdb, start, num, F_WRLCK);
893 /* FIXME: Check right record free? */
896 newdata = malloc(key.dsize + old_dlen + dbuf.dsize);
898 tdb->ecode = TDB_ERR_OOM;
899 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
900 "tdb_append: cannot allocate %llu bytes!\n",
901 (long long)key.dsize + old_dlen + dbuf.dsize);
904 if (tdb->methods->read(tdb, off + sizeof(rec) + key.dsize,
905 newdata, old_dlen) != 0) {
909 memcpy(newdata + old_dlen, dbuf.dptr, dbuf.dsize);
910 new_dbuf.dptr = newdata;
911 new_dbuf.dsize = old_dlen + dbuf.dsize;
917 /* If they're using tdb_append(), it implies they're growing record. */
918 ret = replace_data(tdb, h, key, new_dbuf, bucket, off, old_room,
919 rec_zone_bits(&rec), true);
920 unlock_lists(tdb, start, num, F_WRLCK);
923 /* FIXME: by simple simulation, this approximated 60% full.
924 * Check in real case! */
925 if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
931 unlock_lists(tdb, start, num, F_WRLCK);
935 struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
937 tdb_off_t off, start, num, bucket;
938 struct tdb_used_record rec;
942 h = tdb_hash(tdb, key.dptr, key.dsize);
943 off = find_and_lock(tdb, key, h, F_RDLCK, &start, &num, &bucket, &rec);
944 if (unlikely(off == TDB_OFF_ERR))
948 tdb->ecode = TDB_ERR_NOEXIST;
951 ret.dsize = rec_data_length(&rec);
952 ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize,
956 unlock_lists(tdb, start, num, F_RDLCK);
960 int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
962 tdb_off_t i, bucket, off, start, num;
963 struct tdb_used_record rec;
966 h = tdb_hash(tdb, key.dptr, key.dsize);
967 start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
968 if (unlikely(start == TDB_OFF_ERR))
971 /* FIXME: Fastpath: if next is zero, we can delete without lock,
972 * since this lock protects us. */
973 off = find_and_lock_slow(tdb, key, h, F_WRLCK,
974 &start, &num, &bucket, &rec);
975 if (unlikely(off == TDB_OFF_ERR))
979 /* FIXME: We could optimize not found case if it mattered, by
980 * reading offset after first lock: if it's zero, goto here. */
981 unlock_lists(tdb, start, num, F_WRLCK);
982 tdb->ecode = TDB_ERR_NOEXIST;
985 /* Since we found the entry, we must have locked it and a zero. */
988 /* This actually unlinks it. */
989 if (tdb_write_off(tdb, hash_off(tdb, bucket), 0) == -1)
992 /* Rehash anything following. */
993 for (i = bucket+1; i != bucket + num - 1; i++) {
994 tdb_off_t hoff, off2;
997 hoff = hash_off(tdb, i);
998 off2 = tdb_read_off(tdb, hoff);
999 if (unlikely(off2 == TDB_OFF_ERR))
1002 /* This can happen if we raced. */
1003 if (unlikely(off2 == 0))
1006 /* Maybe use a bit to indicate it is in ideal place? */
1007 h2 = hash_record(tdb, off2);
1008 /* Is it happy where it is? */
1009 if (hash_off(tdb, h2) == hoff)
1013 if (tdb_write_off(tdb, hoff, 0) == -1)
1017 if (hash_add(tdb, h2, off2) == -1)
1021 /* Free the deleted entry. */
1022 if (add_free_record(tdb, rec_zone_bits(&rec), off,
1023 sizeof(struct tdb_used_record)
1024 + rec_key_length(&rec)
1025 + rec_data_length(&rec)
1026 + rec_extra_padding(&rec)) != 0)
1029 unlock_lists(tdb, start, num, F_WRLCK);
1033 unlock_lists(tdb, start, num, F_WRLCK);
1037 int tdb_close(struct tdb_context *tdb)
1039 struct tdb_context **i;
1043 if (tdb->transaction) {
1044 tdb_transaction_cancel(tdb);
1047 tdb_trace(tdb, "tdb_close");
1050 if (tdb->flags & TDB_INTERNAL)
1055 free((char *)tdb->name);
1056 if (tdb->fd != -1) {
1057 ret = close(tdb->fd);
1060 free(tdb->lockrecs);
1062 /* Remove from contexts list */
1063 for (i = &tdbs; *i; i = &(*i)->next) {
1071 close(tdb->tracefd);
1078 enum TDB_ERROR tdb_error(struct tdb_context *tdb)