]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/tdb.c
175827e248b8631263d1bba0ac107f670783af41
[ccan] / ccan / tdb2 / tdb.c
1 #include "private.h"
2 #include <ccan/tdb2/tdb2.h>
3 #include <ccan/hash/hash.h>
4 #include <ccan/build_assert/build_assert.h>
5 #include <ccan/likely/likely.h>
6 #include <assert.h>
7
8 /* The null return. */
9 struct tdb_data tdb_null = { .dptr = NULL, .dsize = 0 };
10
11 /* all contexts, to ensure no double-opens (fcntl locks don't nest!) */
12 static struct tdb_context *tdbs = NULL;
13
14 PRINTF_ATTRIBUTE(4, 5) static void
15 null_log_fn(struct tdb_context *tdb,
16             enum tdb_debug_level level, void *priv,
17             const char *fmt, ...)
18 {
19 }
20
21 /* We do a lot of work assuming our copy of the header volatile area
22  * is uptodate, and usually it is.  However, once we grab a lock, we have to
23  * re-check it. */
24 bool header_changed(struct tdb_context *tdb)
25 {
26         uint64_t gen;
27
28         if (!(tdb->flags & TDB_NOLOCK) && tdb->header_uptodate) {
29                 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
30                          "warning: header uptodate already\n");
31         }
32
33         /* We could get a partial update if we're not holding any locks. */
34         assert((tdb->flags & TDB_NOLOCK) || tdb_has_locks(tdb));
35
36         tdb->header_uptodate = true;
37         gen = tdb_read_off(tdb, offsetof(struct tdb_header, v.generation));
38         if (unlikely(gen != tdb->header.v.generation)) {
39                 tdb_read_convert(tdb, offsetof(struct tdb_header, v),
40                                  &tdb->header.v, sizeof(tdb->header.v));
41                 return true;
42         }
43         return false;
44 }
45
46 int write_header(struct tdb_context *tdb)
47 {
48         assert(tdb_read_off(tdb, offsetof(struct tdb_header, v.generation))
49                == tdb->header.v.generation);
50         tdb->header.v.generation++;
51         return tdb_write_convert(tdb, offsetof(struct tdb_header, v),
52                                  &tdb->header.v, sizeof(tdb->header.v));
53 }
54
55 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
56                              void *arg)
57 {
58         return hash64_stable((const unsigned char *)key, length, seed);
59 }
60
61 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
62 {
63         return tdb->khash(ptr, len, tdb->header.hash_seed, tdb->hash_priv);
64 }
65
66 static bool tdb_already_open(dev_t device, ino_t ino)
67 {
68         struct tdb_context *i;
69         
70         for (i = tdbs; i; i = i->next) {
71                 if (i->device == device && i->inode == ino) {
72                         return true;
73                 }
74         }
75
76         return false;
77 }
78
79 static uint64_t random_number(struct tdb_context *tdb)
80 {
81         int fd;
82         uint64_t ret = 0;
83         struct timeval now;
84
85         fd = open("/dev/urandom", O_RDONLY);
86         if (fd >= 0) {
87                 if (tdb_read_all(fd, &ret, sizeof(ret))) {
88                         tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
89                                  "tdb_open: random from /dev/urandom\n");
90                         close(fd);
91                         return ret;
92                 }
93                 close(fd);
94         }
95         /* FIXME: Untested!  Based on Wikipedia protocol description! */
96         fd = open("/dev/egd-pool", O_RDWR);
97         if (fd >= 0) {
98                 /* Command is 1, next byte is size we want to read. */
99                 char cmd[2] = { 1, sizeof(uint64_t) };
100                 if (write(fd, cmd, sizeof(cmd)) == sizeof(cmd)) {
101                         char reply[1 + sizeof(uint64_t)];
102                         int r = read(fd, reply, sizeof(reply));
103                         if (r > 1) {
104                                 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
105                                          "tdb_open: %u random bytes from"
106                                          " /dev/egd-pool\n", r-1);
107                                 /* Copy at least some bytes. */
108                                 memcpy(&ret, reply+1, r - 1);
109                                 if (reply[0] == sizeof(uint64_t)
110                                     && r == sizeof(reply)) {
111                                         close(fd);
112                                         return ret;
113                                 }
114                         }
115                 }
116                 close(fd);
117         }
118
119         /* Fallback: pid and time. */
120         gettimeofday(&now, NULL);
121         ret = getpid() * 100132289ULL + now.tv_sec * 1000000ULL + now.tv_usec;
122         tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
123                  "tdb_open: random from getpid and time\n");
124         return ret;
125 }
126
127 struct new_database {
128         struct tdb_header hdr;
129         struct tdb_used_record hrec;
130         tdb_off_t hash[1ULL << INITIAL_HASH_BITS];
131         struct tdb_used_record frec;
132         tdb_off_t free[INITIAL_FREE_BUCKETS + 1]; /* One overflow bucket */
133 };
134
135 /* initialise a new database */
136 static int tdb_new_database(struct tdb_context *tdb)
137 {
138         /* We make it up in memory, then write it out if not internal */
139         struct new_database newdb;
140         unsigned int magic_off = offsetof(struct tdb_header, magic_food);
141
142         /* Fill in the header */
143         newdb.hdr.version = TDB_VERSION;
144         newdb.hdr.hash_seed = random_number(tdb);
145         newdb.hdr.hash_test = TDB_HASH_MAGIC;
146         newdb.hdr.hash_test = tdb->khash(&newdb.hdr.hash_test,
147                                          sizeof(newdb.hdr.hash_test),
148                                          newdb.hdr.hash_seed,
149                                          tdb->hash_priv);
150
151         newdb.hdr.v.generation = 0;
152
153         /* The initial zone must cover the initial database size! */
154         BUILD_ASSERT((1ULL << INITIAL_ZONE_BITS) >= sizeof(newdb));
155
156         /* Free array has 1 zone, 10 buckets.  All buckets empty. */
157         newdb.hdr.v.num_zones = 1;
158         newdb.hdr.v.zone_bits = INITIAL_ZONE_BITS;
159         newdb.hdr.v.free_buckets = INITIAL_FREE_BUCKETS;
160         newdb.hdr.v.free_off = offsetof(struct new_database, free);
161         set_header(tdb, &newdb.frec, 0,
162                    sizeof(newdb.free), sizeof(newdb.free), 0);
163         memset(newdb.free, 0, sizeof(newdb.free));
164
165         /* Initial hashes are empty. */
166         newdb.hdr.v.hash_bits = INITIAL_HASH_BITS;
167         newdb.hdr.v.hash_off = offsetof(struct new_database, hash);
168         set_header(tdb, &newdb.hrec, 0,
169                    sizeof(newdb.hash), sizeof(newdb.hash), 0);
170         memset(newdb.hash, 0, sizeof(newdb.hash));
171
172         /* Magic food */
173         memset(newdb.hdr.magic_food, 0, sizeof(newdb.hdr.magic_food));
174         strcpy(newdb.hdr.magic_food, TDB_MAGIC_FOOD);
175
176         /* This creates an endian-converted database, as if read from disk */
177         tdb_convert(tdb,
178                     (char *)&newdb.hdr + magic_off,
179                     sizeof(newdb) - magic_off);
180
181         tdb->header = newdb.hdr;
182
183         if (tdb->flags & TDB_INTERNAL) {
184                 tdb->map_size = sizeof(newdb);
185                 tdb->map_ptr = malloc(tdb->map_size);
186                 if (!tdb->map_ptr) {
187                         tdb->ecode = TDB_ERR_OOM;
188                         return -1;
189                 }
190                 memcpy(tdb->map_ptr, &newdb, tdb->map_size);
191                 return 0;
192         }
193         if (lseek(tdb->fd, 0, SEEK_SET) == -1)
194                 return -1;
195
196         if (ftruncate(tdb->fd, 0) == -1)
197                 return -1;
198
199         if (!tdb_pwrite_all(tdb->fd, &newdb, sizeof(newdb), 0)) {
200                 tdb->ecode = TDB_ERR_IO;
201                 return -1;
202         }
203         return 0;
204 }
205
206 struct tdb_context *tdb_open(const char *name, int tdb_flags,
207                              int open_flags, mode_t mode,
208                              union tdb_attribute *attr)
209 {
210         struct tdb_context *tdb;
211         struct stat st;
212         int save_errno;
213         uint64_t hash_test;
214         unsigned v;
215
216         tdb = malloc(sizeof(*tdb));
217         if (!tdb) {
218                 /* Can't log this */
219                 errno = ENOMEM;
220                 goto fail;
221         }
222         tdb->name = NULL;
223         tdb->map_ptr = NULL;
224         tdb->fd = -1;
225         /* map_size will be set below. */
226         tdb->ecode = TDB_SUCCESS;
227         /* header will be read in below. */
228         tdb->header_uptodate = false;
229         tdb->flags = tdb_flags;
230         tdb->log = null_log_fn;
231         tdb->log_priv = NULL;
232         tdb->khash = jenkins_hash;
233         tdb->hash_priv = NULL;
234         tdb->transaction = NULL;
235         /* last_zone will be set below. */
236         tdb_io_init(tdb);
237         tdb_lock_init(tdb);
238
239         while (attr) {
240                 switch (attr->base.attr) {
241                 case TDB_ATTRIBUTE_LOG:
242                         tdb->log = attr->log.log_fn;
243                         tdb->log_priv = attr->log.log_private;
244                         break;
245                 case TDB_ATTRIBUTE_HASH:
246                         tdb->khash = attr->hash.hash_fn;
247                         tdb->hash_priv = attr->hash.hash_private;
248                         break;
249                 default:
250                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
251                                  "tdb_open: unknown attribute type %u\n",
252                                  attr->base.attr);
253                         errno = EINVAL;
254                         goto fail;
255                 }
256                 attr = attr->base.next;
257         }
258
259         if ((open_flags & O_ACCMODE) == O_WRONLY) {
260                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
261                          "tdb_open: can't open tdb %s write-only\n", name);
262                 errno = EINVAL;
263                 goto fail;
264         }
265
266         if ((open_flags & O_ACCMODE) == O_RDONLY) {
267                 tdb->read_only = true;
268                 /* read only databases don't do locking */
269                 tdb->flags |= TDB_NOLOCK;
270         } else
271                 tdb->read_only = false;
272
273         /* internal databases don't need any of the rest. */
274         if (tdb->flags & TDB_INTERNAL) {
275                 tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP);
276                 if (tdb_new_database(tdb) != 0) {
277                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
278                                  "tdb_open: tdb_new_database failed!");
279                         goto fail;
280                 }
281                 TEST_IT(tdb->flags & TDB_CONVERT);
282                 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
283                 /* Zones don't matter for internal db. */
284                 tdb->last_zone = 0;
285                 return tdb;
286         }
287
288         if ((tdb->fd = open(name, open_flags, mode)) == -1) {
289                 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
290                          "tdb_open: could not open file %s: %s\n",
291                          name, strerror(errno));
292                 goto fail;      /* errno set by open(2) */
293         }
294
295         /* on exec, don't inherit the fd */
296         v = fcntl(tdb->fd, F_GETFD, 0);
297         fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC);
298
299         /* ensure there is only one process initialising at once */
300         if (tdb_lock_open(tdb) == -1) {
301                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
302                          "tdb_open: failed to get open lock on %s: %s\n",
303                          name, strerror(errno));
304                 goto fail;      /* errno set by tdb_brlock */
305         }
306
307         if (!tdb_pread_all(tdb->fd, &tdb->header, sizeof(tdb->header), 0)
308             || strcmp(tdb->header.magic_food, TDB_MAGIC_FOOD) != 0) {
309                 if (!(open_flags & O_CREAT) || tdb_new_database(tdb) == -1) {
310                         if (errno == 0) {
311                                 errno = EIO; /* ie bad format or something */
312                         }
313                         goto fail;
314                 }
315         } else if (tdb->header.version != TDB_VERSION) {
316                 if (tdb->header.version == bswap_64(TDB_VERSION))
317                         tdb->flags |= TDB_CONVERT;
318                 else {
319                         /* wrong version */
320                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
321                                  "tdb_open: %s is unknown version 0x%llx\n",
322                                  name, (long long)tdb->header.version);
323                         errno = EIO;
324                         goto fail;
325                 }
326         }
327
328         tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
329         hash_test = TDB_HASH_MAGIC;
330         hash_test = tdb->khash(&hash_test, sizeof(hash_test),
331                                tdb->header.hash_seed, tdb->hash_priv);
332         if (tdb->header.hash_test != hash_test) {
333                 /* wrong hash variant */
334                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
335                          "tdb_open: %s uses a different hash function\n",
336                          name);
337                 errno = EIO;
338                 goto fail;
339         }
340
341         if (fstat(tdb->fd, &st) == -1)
342                 goto fail;
343
344         /* Is it already in the open list?  If so, fail. */
345         if (tdb_already_open(st.st_dev, st.st_ino)) {
346                 /* FIXME */
347                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
348                          "tdb_open: %s (%d,%d) is already open in this process\n",
349                          name, (int)st.st_dev, (int)st.st_ino);
350                 errno = EBUSY;
351                 goto fail;
352         }
353
354         tdb->name = strdup(name);
355         if (!tdb->name) {
356                 errno = ENOMEM;
357                 goto fail;
358         }
359
360         tdb->map_size = st.st_size;
361         tdb->device = st.st_dev;
362         tdb->inode = st.st_ino;
363         tdb_mmap(tdb);
364         tdb_unlock_open(tdb);
365         tdb_zone_init(tdb);
366
367         tdb->next = tdbs;
368         tdbs = tdb;
369         return tdb;
370
371  fail:
372         save_errno = errno;
373
374         if (!tdb)
375                 return NULL;
376
377 #ifdef TDB_TRACE
378         close(tdb->tracefd);
379 #endif
380         if (tdb->map_ptr) {
381                 if (tdb->flags & TDB_INTERNAL) {
382                         free(tdb->map_ptr);
383                 } else
384                         tdb_munmap(tdb);
385         }
386         free((char *)tdb->name);
387         if (tdb->fd != -1)
388                 if (close(tdb->fd) != 0)
389                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
390                                  "tdb_open: failed to close tdb->fd"
391                                  " on error!\n");
392         free(tdb);
393         errno = save_errno;
394         return NULL;
395 }
396
397 tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
398 {
399         return tdb->header.v.hash_off
400                 + ((list & ((1ULL << tdb->header.v.hash_bits) - 1))
401                    * sizeof(tdb_off_t));
402 }
403
404 /* Returns 0 if the entry is a zero (definitely not a match).
405  * Returns a valid entry offset if it's a match.  Fills in rec.
406  * Otherwise returns TDB_OFF_ERR: keep searching. */
407 static tdb_off_t entry_matches(struct tdb_context *tdb,
408                                uint64_t list,
409                                uint64_t hash,
410                                const struct tdb_data *key,
411                                struct tdb_used_record *rec)
412 {
413         tdb_off_t off;
414         uint64_t keylen;
415         const unsigned char *rkey;
416
417         list &= ((1ULL << tdb->header.v.hash_bits) - 1);
418
419         off = tdb_read_off(tdb, tdb->header.v.hash_off
420                            + list * sizeof(tdb_off_t));
421         if (off == 0 || off == TDB_OFF_ERR)
422                 return off;
423
424 #if 0 /* FIXME: Check other bits. */
425         unsigned int bits, bitmask, hoffextra;
426         /* Bottom three bits show how many extra hash bits. */
427         bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1;
428         bitmask = (1 << bits)-1;
429         hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask);
430         uint64_t hextra = hash >> tdb->header.v.hash_bits;
431         if ((hextra & bitmask) != hoffextra) 
432                 return TDB_OFF_ERR;
433         off &= ~...;
434 #endif
435
436         if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
437                 return TDB_OFF_ERR;
438
439         /* FIXME: check extra bits in header! */
440         keylen = rec_key_length(rec);
441         if (keylen != key->dsize)
442                 return TDB_OFF_ERR;
443
444         rkey = tdb_access_read(tdb, off + sizeof(*rec), keylen, false);
445         if (!rkey)
446                 return TDB_OFF_ERR;
447         if (memcmp(rkey, key->dptr, keylen) != 0)
448                 off = TDB_OFF_ERR;
449         tdb_access_release(tdb, rkey);
450         return off;
451 }
452
453 /* FIXME: Optimize? */
454 static void unlock_lists(struct tdb_context *tdb,
455                          tdb_off_t list, tdb_len_t num,
456                          int ltype)
457 {
458         tdb_off_t i;
459
460         for (i = list; i < list + num; i++)
461                 tdb_unlock_list(tdb, i, ltype);
462 }
463
464 /* FIXME: Optimize? */
465 static int lock_lists(struct tdb_context *tdb,
466                       tdb_off_t list, tdb_len_t num,
467                       int ltype)
468 {
469         tdb_off_t i;
470
471         for (i = list; i < list + num; i++) {
472                 if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT)
473                     == TDB_OFF_ERR) {
474                         unlock_lists(tdb, list, i - list, ltype);
475                         return -1;
476                 }
477         }
478         return 0;
479 }
480
481 /* We lock hashes up to the next empty offset.  We already hold the
482  * lock on the start bucket, but we may need to release and re-grab
483  * it.  If we fail, we hold no locks at all! */
484 static tdb_len_t relock_hash_to_zero(struct tdb_context *tdb,
485                                      tdb_off_t start, int ltype)
486 {
487         tdb_len_t num, len;
488
489 again:
490         num = 1ULL << tdb->header.v.hash_bits;
491         len = tdb_find_zero_off(tdb, hash_off(tdb, start), num - start);
492         if (unlikely(len == num - start)) {
493                 /* We hit the end of the hash range.  Drop lock: we have
494                    to lock start of hash first. */
495                 tdb_len_t pre_locks;
496
497                 tdb_unlock_list(tdb, start, ltype);
498
499                 /* Grab something, so header is stable. */
500                 if (tdb_lock_list(tdb, 0, ltype, TDB_LOCK_WAIT))
501                         return TDB_OFF_ERR;
502                 pre_locks = tdb_find_zero_off(tdb, hash_off(tdb, 0), num);
503                 /* We want to lock the zero entry as well. */
504                 pre_locks++;
505                 if (lock_lists(tdb, 1, pre_locks - 1, ltype) == -1) {
506                         tdb_unlock_list(tdb, 0, ltype);
507                         return TDB_OFF_ERR;
508                 }
509
510                 /* Now lock later ones. */
511                 if (unlikely(lock_lists(tdb, start, len, ltype) == -1)) {
512                         unlock_lists(tdb, 0, pre_locks, ltype);
513                         return TDB_OFF_ERR;
514                 }
515                 len += pre_locks;
516         } else {
517                 /* We want to lock the zero entry as well. */
518                 len++;
519                 /* But we already have lock on start. */
520                 if (unlikely(lock_lists(tdb, start+1, len-1, ltype) == -1)) {
521                         tdb_unlock_list(tdb, start, ltype);
522                         return TDB_OFF_ERR;
523                 }
524         }
525
526         /* Now, did we lose the race, and it's not zero any more? */
527         if (unlikely(tdb_read_off(tdb, hash_off(tdb, start + len - 1)) != 0)) {
528                 /* Leave the start locked, as expected. */
529                 unlock_lists(tdb, start + 1, len - 1, ltype);
530                 goto again;
531         }
532
533         return len;
534 }
535
536 /* FIXME: modify, don't rewrite! */
537 static int update_rec_hdr(struct tdb_context *tdb,
538                           tdb_off_t off,
539                           tdb_len_t keylen,
540                           tdb_len_t datalen,
541                           struct tdb_used_record *rec,
542                           uint64_t h)
543 {
544         uint64_t dataroom = rec_data_length(rec) + rec_extra_padding(rec);
545
546         if (set_header(tdb, rec, keylen, datalen, keylen + dataroom, h))
547                 return -1;
548
549         return tdb_write_convert(tdb, off, rec, sizeof(*rec));
550 }
551
552 static int hash_add(struct tdb_context *tdb,
553                     uint64_t hash, tdb_off_t off)
554 {
555         tdb_off_t i, hoff, len, num;
556
557         /* Look for next space. */
558         i = (hash & ((1ULL << tdb->header.v.hash_bits) - 1));
559         len = (1ULL << tdb->header.v.hash_bits) - i;
560         num = tdb_find_zero_off(tdb, hash_off(tdb, i), len);
561
562         if (unlikely(num == len)) {
563                 /* We wrapped.  Look through start of hash table. */
564                 i = 0;
565                 hoff = hash_off(tdb, 0);
566                 len = (1ULL << tdb->header.v.hash_bits);
567                 num = tdb_find_zero_off(tdb, hoff, len);
568                 if (num == len) {
569                         tdb->ecode = TDB_ERR_CORRUPT;
570                         tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
571                                  "hash_add: full hash table!\n");
572                         return -1;
573                 }
574         }
575         if (tdb_read_off(tdb, hash_off(tdb, i + num)) != 0) {
576                 tdb->ecode = TDB_ERR_CORRUPT;
577                 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
578                          "hash_add: overwriting hash table?\n");
579                 return -1;
580         }
581
582         /* FIXME: Encode extra hash bits! */
583         return tdb_write_off(tdb, hash_off(tdb, i + num), off);
584 }
585
586 /* If we fail, others will try after us. */
587 static void enlarge_hash(struct tdb_context *tdb)
588 {
589         tdb_off_t newoff, oldoff, i;
590         tdb_len_t hlen;
591         uint64_t num = 1ULL << tdb->header.v.hash_bits;
592         struct tdb_used_record pad, *r;
593         unsigned int records = 0;
594
595         /* FIXME: We should do this without holding locks throughout. */
596         if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
597                 return;
598
599         /* Someone else enlarged for us?  Nothing to do. */
600         if ((1ULL << tdb->header.v.hash_bits) != num)
601                 goto unlock;
602
603 again:
604         /* Allocate our new array. */
605         hlen = num * sizeof(tdb_off_t) * 2;
606         newoff = alloc(tdb, 0, hlen, 0, false);
607         if (unlikely(newoff == TDB_OFF_ERR))
608                 goto unlock;
609         if (unlikely(newoff == 0)) {
610                 if (tdb_expand(tdb, 0, hlen, false) == -1)
611                         goto unlock;
612                 goto again;
613         }
614         /* Step over record header! */
615         newoff += sizeof(struct tdb_used_record);
616
617         /* Starts all zero. */
618         if (zero_out(tdb, newoff, hlen) == -1)
619                 goto unlock;
620
621         /* Update header now so we can use normal routines. */
622         oldoff = tdb->header.v.hash_off;
623
624         tdb->header.v.hash_bits++;
625         tdb->header.v.hash_off = newoff;
626
627         /* FIXME: If the space before is empty, we know this is in its ideal
628          * location.  Or steal a bit from the pointer to avoid rehash. */
629         for (i = 0; i < num; i++) {
630                 tdb_off_t off;
631                 off = tdb_read_off(tdb, oldoff + i * sizeof(tdb_off_t));
632                 if (unlikely(off == TDB_OFF_ERR))
633                         goto oldheader;
634                 if (off && hash_add(tdb, hash_record(tdb, off), off) == -1)
635                         goto oldheader;
636                 if (off)
637                         records++;
638         }
639
640         tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
641                  "enlarge_hash: moved %u records from %llu buckets.\n",
642                  records, (long long)num);
643
644         /* Free up old hash. */
645         r = tdb_get(tdb, oldoff - sizeof(*r), &pad, sizeof(*r));
646         if (!r)
647                 goto oldheader;
648         add_free_record(tdb, oldoff - sizeof(*r),
649                         sizeof(*r)+rec_data_length(r)+rec_extra_padding(r));
650
651         /* Now we write the modified header. */
652         write_header(tdb);
653 unlock:
654         tdb_allrecord_unlock(tdb, F_WRLCK);
655         return;
656
657 oldheader:
658         tdb->header.v.hash_bits--;
659         tdb->header.v.hash_off = oldoff;
660         goto unlock;
661 }
662
663
664 /* This is the slow version of the routine which searches the
665  * hashtable for an entry.
666  * We lock every hash bucket up to and including the next zero one.
667  */
668 static tdb_off_t find_and_lock_slow(struct tdb_context *tdb,
669                                     struct tdb_data key,
670                                     uint64_t h,
671                                     int ltype,
672                                     tdb_off_t *start_lock,
673                                     tdb_len_t *num_locks,
674                                     tdb_off_t *bucket,
675                                     struct tdb_used_record *rec)
676 {
677         /* Warning: this may drop the lock on *bucket! */
678         *num_locks = relock_hash_to_zero(tdb, *start_lock, ltype);
679         if (*num_locks == TDB_OFF_ERR)
680                 return TDB_OFF_ERR;
681
682         for (*bucket = *start_lock;
683              *bucket < *start_lock + *num_locks;
684              (*bucket)++) {
685                 tdb_off_t off = entry_matches(tdb, *bucket, h, &key, rec);
686                 /* Empty entry or we found it? */
687                 if (off == 0 || off != TDB_OFF_ERR)
688                         return off;
689         }
690
691         /* We didn't find a zero entry?  Something went badly wrong... */
692         unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype);
693         tdb->ecode = TDB_ERR_CORRUPT;
694         tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
695                  "find_and_lock: expected to find an empty hash bucket!\n");
696         return TDB_OFF_ERR;
697 }
698
699 /* This is the core routine which searches the hashtable for an entry.
700  * On error, no locks are held and TDB_OFF_ERR is returned.
701  * Otherwise, *num_locks locks of type ltype from *start_lock are held.
702  * The bucket where the entry is (or would be) is in *bucket.
703  * If not found, the return value is 0.
704  * If found, the return value is the offset, and *rec is the record. */
705 static tdb_off_t find_and_lock(struct tdb_context *tdb,
706                                struct tdb_data key,
707                                uint64_t h,
708                                int ltype,
709                                tdb_off_t *start_lock,
710                                tdb_len_t *num_locks,
711                                tdb_off_t *bucket,
712                                struct tdb_used_record *rec)
713 {
714         tdb_off_t off;
715
716         /* FIXME: can we avoid locks for some fast paths? */
717         *start_lock = tdb_lock_list(tdb, h, ltype, TDB_LOCK_WAIT);
718         if (*start_lock == TDB_OFF_ERR)
719                 return TDB_OFF_ERR;
720
721         /* Fast path. */
722         off = entry_matches(tdb, *start_lock, h, &key, rec);
723         if (likely(off != TDB_OFF_ERR)) {
724                 *bucket = *start_lock;
725                 *num_locks = 1;
726                 return off;
727         }
728
729         /* Slow path, need to grab more locks and search. */
730         return find_and_lock_slow(tdb, key, h, ltype, start_lock, num_locks,
731                                   bucket, rec);
732 }
733
734 /* Returns -1 on error, 0 on OK, 1 on "expand and retry." */
735 static int replace_data(struct tdb_context *tdb,
736                         uint64_t h, struct tdb_data key, struct tdb_data dbuf,
737                         tdb_off_t bucket,
738                         tdb_off_t old_off, tdb_len_t old_room,
739                         bool growing)
740 {
741         tdb_off_t new_off;
742
743         /* Allocate a new record. */
744         new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
745         if (unlikely(new_off == TDB_OFF_ERR))
746                 return -1;
747
748         if (unlikely(new_off == 0))
749                 return 1;
750
751         /* We didn't like the existing one: remove it. */
752         if (old_off)
753                 add_free_record(tdb, old_off,
754                                 sizeof(struct tdb_used_record)
755                                 + key.dsize + old_room);
756
757         /* FIXME: Encode extra hash bits! */
758         if (tdb_write_off(tdb, hash_off(tdb, bucket), new_off) == -1)
759                 return -1;
760
761         new_off += sizeof(struct tdb_used_record);
762         if (tdb->methods->write(tdb, new_off, key.dptr, key.dsize) == -1)
763                 return -1;
764
765         new_off += key.dsize;
766         if (tdb->methods->write(tdb, new_off, dbuf.dptr, dbuf.dsize) == -1)
767                 return -1;
768
769         /* FIXME: tdb_increment_seqnum(tdb); */
770         return 0;
771 }
772
773 int tdb_store(struct tdb_context *tdb,
774               struct tdb_data key, struct tdb_data dbuf, int flag)
775 {
776         tdb_off_t off, bucket, start, num;
777         tdb_len_t old_room = 0;
778         struct tdb_used_record rec;
779         uint64_t h;
780         int ret;
781
782         h = tdb_hash(tdb, key.dptr, key.dsize);
783         off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
784         if (unlikely(off == TDB_OFF_ERR))
785                 return -1;
786
787         /* Now we have lock on this hash bucket. */
788         if (flag == TDB_INSERT) {
789                 if (off) {
790                         tdb->ecode = TDB_ERR_EXISTS;
791                         goto fail;
792                 }
793         } else {
794                 if (off) {
795                         old_room = rec_data_length(&rec)
796                                 + rec_extra_padding(&rec);
797                         if (old_room >= dbuf.dsize) {
798                                 /* Can modify in-place.  Easy! */
799                                 if (update_rec_hdr(tdb, off,
800                                                    key.dsize, dbuf.dsize,
801                                                    &rec, h))
802                                         goto fail;
803                                 if (tdb->methods->write(tdb, off + sizeof(rec)
804                                                         + key.dsize,
805                                                         dbuf.dptr, dbuf.dsize))
806                                         goto fail;
807                                 unlock_lists(tdb, start, num, F_WRLCK);
808                                 return 0;
809                         }
810                         /* FIXME: See if right record is free? */
811                 } else {
812                         if (flag == TDB_MODIFY) {
813                                 /* if the record doesn't exist and we
814                                    are in TDB_MODIFY mode then we should fail
815                                    the store */
816                                 tdb->ecode = TDB_ERR_NOEXIST;
817                                 goto fail;
818                         }
819                 }
820         }
821
822         /* If we didn't use the old record, this implies we're growing. */
823         ret = replace_data(tdb, h, key, dbuf, bucket, off, old_room, off != 0);
824         unlock_lists(tdb, start, num, F_WRLCK);
825
826         if (unlikely(ret == 1)) {
827                 /* Expand, then try again... */
828                 if (tdb_expand(tdb, key.dsize, dbuf.dsize, off != 0) == -1)
829                         return -1;
830                 return tdb_store(tdb, key, dbuf, flag);
831         }
832
833         /* FIXME: by simple simulation, this approximated 60% full.
834          * Check in real case! */
835         if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
836                 enlarge_hash(tdb);
837
838         return ret;
839
840 fail:
841         unlock_lists(tdb, start, num, F_WRLCK);
842         return -1;
843 }
844
845 int tdb_append(struct tdb_context *tdb,
846                struct tdb_data key, struct tdb_data dbuf)
847 {
848         tdb_off_t off, bucket, start, num;
849         struct tdb_used_record rec;
850         tdb_len_t old_room = 0, old_dlen;
851         uint64_t h;
852         unsigned char *newdata;
853         struct tdb_data new_dbuf;
854         int ret;
855
856         h = tdb_hash(tdb, key.dptr, key.dsize);
857         off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
858         if (unlikely(off == TDB_OFF_ERR))
859                 return -1;
860
861         if (off) {
862                 old_dlen = rec_data_length(&rec);
863                 old_room = old_dlen + rec_extra_padding(&rec);
864
865                 /* Fast path: can append in place. */
866                 if (rec_extra_padding(&rec) >= dbuf.dsize) {
867                         if (update_rec_hdr(tdb, off, key.dsize,
868                                            old_dlen + dbuf.dsize, &rec, h))
869                                 goto fail;
870
871                         off += sizeof(rec) + key.dsize + old_dlen;
872                         if (tdb->methods->write(tdb, off, dbuf.dptr,
873                                                 dbuf.dsize) == -1)
874                                 goto fail;
875
876                         /* FIXME: tdb_increment_seqnum(tdb); */
877                         unlock_lists(tdb, start, num, F_WRLCK);
878                         return 0;
879                 }
880                 /* FIXME: Check right record free? */
881
882                 /* Slow path. */
883                 newdata = malloc(key.dsize + old_dlen + dbuf.dsize);
884                 if (!newdata) {
885                         tdb->ecode = TDB_ERR_OOM;
886                         tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
887                                  "tdb_append: cannot allocate %llu bytes!\n",
888                                  (long long)key.dsize + old_dlen + dbuf.dsize);
889                         goto fail;
890                 }
891                 if (tdb->methods->read(tdb, off + sizeof(rec) + key.dsize,
892                                        newdata, old_dlen) != 0) {
893                         free(newdata);
894                         goto fail;
895                 }
896                 memcpy(newdata + old_dlen, dbuf.dptr, dbuf.dsize);
897                 new_dbuf.dptr = newdata;
898                 new_dbuf.dsize = old_dlen + dbuf.dsize;
899         } else {
900                 newdata = NULL;
901                 new_dbuf = dbuf;
902         }
903
904         /* If they're using tdb_append(), it implies they're growing record. */
905         ret = replace_data(tdb, h, key, new_dbuf, bucket, off, old_room, true);
906         unlock_lists(tdb, start, num, F_WRLCK);
907         free(newdata);
908
909         if (unlikely(ret == 1)) {
910                 /* Expand, then try again. */
911                 if (tdb_expand(tdb, key.dsize, dbuf.dsize, true) == -1)
912                         return -1;
913                 return tdb_append(tdb, key, dbuf);
914         }
915
916         /* FIXME: by simple simulation, this approximated 60% full.
917          * Check in real case! */
918         if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
919                 enlarge_hash(tdb);
920
921         return ret;
922
923 fail:
924         unlock_lists(tdb, start, num, F_WRLCK);
925         return -1;
926 }
927
928 struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
929 {
930         tdb_off_t off, start, num, bucket;
931         struct tdb_used_record rec;
932         uint64_t h;
933         struct tdb_data ret;
934
935         h = tdb_hash(tdb, key.dptr, key.dsize);
936         off = find_and_lock(tdb, key, h, F_RDLCK, &start, &num, &bucket, &rec);
937         if (unlikely(off == TDB_OFF_ERR))
938                 return tdb_null;
939
940         if (!off) {
941                 tdb->ecode = TDB_ERR_NOEXIST;
942                 ret = tdb_null;
943         } else {
944                 ret.dsize = rec_data_length(&rec);
945                 ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize,
946                                           ret.dsize);
947         }
948
949         unlock_lists(tdb, start, num, F_RDLCK);
950         return ret;
951 }
952
953 int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
954 {
955         tdb_off_t i, bucket, off, start, num;
956         struct tdb_used_record rec;
957         uint64_t h;
958
959         h = tdb_hash(tdb, key.dptr, key.dsize);
960         start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
961         if (unlikely(start == TDB_OFF_ERR))
962                 return -1;
963
964         /* FIXME: Fastpath: if next is zero, we can delete without lock,
965          * since this lock protects us. */
966         off = find_and_lock_slow(tdb, key, h, F_WRLCK,
967                                  &start, &num, &bucket, &rec);
968         if (unlikely(off == TDB_OFF_ERR))
969                 return -1;
970
971         if (!off) {
972                 /* FIXME: We could optimize not found case if it mattered, by
973                  * reading offset after first lock: if it's zero, goto here. */
974                 unlock_lists(tdb, start, num, F_WRLCK);
975                 tdb->ecode = TDB_ERR_NOEXIST;
976                 return -1;
977         }
978         /* Since we found the entry, we must have locked it and a zero. */
979         assert(num >= 2);
980
981         /* This actually unlinks it. */
982         if (tdb_write_off(tdb, hash_off(tdb, bucket), 0) == -1)
983                 goto unlock_err;
984
985         /* Rehash anything following. */
986         for (i = bucket+1; i != bucket + num - 1; i++) {
987                 tdb_off_t hoff, off2;
988                 uint64_t h2;
989
990                 hoff = hash_off(tdb, i);
991                 off2 = tdb_read_off(tdb, hoff);
992                 if (unlikely(off2 == TDB_OFF_ERR))
993                         goto unlock_err;
994
995                 /* This can happen if we raced. */
996                 if (unlikely(off2 == 0))
997                         break;
998
999                 /* Maybe use a bit to indicate it is in ideal place? */
1000                 h2 = hash_record(tdb, off2);
1001                 /* Is it happy where it is? */
1002                 if (hash_off(tdb, h2) == hoff)
1003                         continue;
1004
1005                 /* Remove it. */
1006                 if (tdb_write_off(tdb, hoff, 0) == -1)
1007                         goto unlock_err;
1008
1009                 /* Rehash it. */
1010                 if (hash_add(tdb, h2, off2) == -1)
1011                         goto unlock_err;
1012         }
1013
1014         /* Free the deleted entry. */
1015         if (add_free_record(tdb, off,
1016                             sizeof(struct tdb_used_record)
1017                             + rec_key_length(&rec)
1018                             + rec_data_length(&rec)
1019                             + rec_extra_padding(&rec)) != 0)
1020                 goto unlock_err;
1021
1022         unlock_lists(tdb, start, num, F_WRLCK);
1023         return 0;
1024
1025 unlock_err:
1026         unlock_lists(tdb, start, num, F_WRLCK);
1027         return -1;
1028 }
1029
1030 int tdb_close(struct tdb_context *tdb)
1031 {
1032         struct tdb_context **i;
1033         int ret = 0;
1034
1035         /* FIXME:
1036         if (tdb->transaction) {
1037                 tdb_transaction_cancel(tdb);
1038         }
1039         */
1040         tdb_trace(tdb, "tdb_close");
1041
1042         if (tdb->map_ptr) {
1043                 if (tdb->flags & TDB_INTERNAL)
1044                         free(tdb->map_ptr);
1045                 else
1046                         tdb_munmap(tdb);
1047         }
1048         free((char *)tdb->name);
1049         if (tdb->fd != -1) {
1050                 ret = close(tdb->fd);
1051                 tdb->fd = -1;
1052         }
1053         free(tdb->lockrecs);
1054
1055         /* Remove from contexts list */
1056         for (i = &tdbs; *i; i = &(*i)->next) {
1057                 if (*i == tdb) {
1058                         *i = tdb->next;
1059                         break;
1060                 }
1061         }
1062
1063 #ifdef TDB_TRACE
1064         close(tdb->tracefd);
1065 #endif
1066         free(tdb);
1067
1068         return ret;
1069 }
1070
1071 enum TDB_ERROR tdb_error(struct tdb_context *tdb)
1072 {
1073         return tdb->ecode;
1074 }