9ee64746a3e2c9220367f7847579d5f831b1719c
[ccan] / ccan / tdb2 / tdb.c
1 #include "private.h"
2 #include <ccan/tdb2/tdb2.h>
3 #include <ccan/hash/hash.h>
4 #include <ccan/build_assert/build_assert.h>
5 #include <ccan/likely/likely.h>
6 #include <assert.h>
7
8 /* The null return. */
9 struct tdb_data tdb_null = { .dptr = NULL, .dsize = 0 };
10
11 /* all contexts, to ensure no double-opens (fcntl locks don't nest!) */
12 static struct tdb_context *tdbs = NULL;
13
14 PRINTF_ATTRIBUTE(4, 5) static void
15 null_log_fn(struct tdb_context *tdb,
16             enum tdb_debug_level level, void *priv,
17             const char *fmt, ...)
18 {
19 }
20
21 /* We do a lot of work assuming our copy of the header volatile area
22  * is uptodate, and usually it is.  However, once we grab a lock, we have to
23  * re-check it. */
24 bool header_changed(struct tdb_context *tdb)
25 {
26         uint64_t gen;
27
28         if (!(tdb->flags & TDB_NOLOCK) && tdb->header_uptodate) {
29                 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
30                          "warning: header uptodate already\n");
31         }
32
33         /* We could get a partial update if we're not holding any locks. */
34         assert((tdb->flags & TDB_NOLOCK) || tdb_has_locks(tdb));
35
36         tdb->header_uptodate = true;
37         gen = tdb_read_off(tdb, offsetof(struct tdb_header, v.generation));
38         if (unlikely(gen != tdb->header.v.generation)) {
39                 tdb_read_convert(tdb, offsetof(struct tdb_header, v),
40                                  &tdb->header.v, sizeof(tdb->header.v));
41                 return true;
42         }
43         return false;
44 }
45
46 int write_header(struct tdb_context *tdb)
47 {
48         assert(tdb_read_off(tdb, offsetof(struct tdb_header, v.generation))
49                == tdb->header.v.generation);
50         tdb->header.v.generation++;
51         return tdb_write_convert(tdb, offsetof(struct tdb_header, v),
52                                  &tdb->header.v, sizeof(tdb->header.v));
53 }
54
55 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
56                              void *arg)
57 {
58         return hash64_stable((const unsigned char *)key, length, seed);
59 }
60
61 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
62 {
63         return tdb->khash(ptr, len, tdb->header.hash_seed, tdb->hash_priv);
64 }
65
66 static bool tdb_already_open(dev_t device, ino_t ino)
67 {
68         struct tdb_context *i;
69         
70         for (i = tdbs; i; i = i->next) {
71                 if (i->device == device && i->inode == ino) {
72                         return true;
73                 }
74         }
75
76         return false;
77 }
78
79 static uint64_t random_number(struct tdb_context *tdb)
80 {
81         int fd;
82         uint64_t ret = 0;
83         struct timeval now;
84
85         fd = open("/dev/urandom", O_RDONLY);
86         if (fd >= 0) {
87                 if (tdb_read_all(fd, &ret, sizeof(ret))) {
88                         tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
89                                  "tdb_open: random from /dev/urandom\n");
90                         close(fd);
91                         return ret;
92                 }
93                 close(fd);
94         }
95         /* FIXME: Untested!  Based on Wikipedia protocol description! */
96         fd = open("/dev/egd-pool", O_RDWR);
97         if (fd >= 0) {
98                 /* Command is 1, next byte is size we want to read. */
99                 char cmd[2] = { 1, sizeof(uint64_t) };
100                 if (write(fd, cmd, sizeof(cmd)) == sizeof(cmd)) {
101                         char reply[1 + sizeof(uint64_t)];
102                         int r = read(fd, reply, sizeof(reply));
103                         if (r > 1) {
104                                 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
105                                          "tdb_open: %u random bytes from"
106                                          " /dev/egd-pool\n", r-1);
107                                 /* Copy at least some bytes. */
108                                 memcpy(&ret, reply+1, r - 1);
109                                 if (reply[0] == sizeof(uint64_t)
110                                     && r == sizeof(reply)) {
111                                         close(fd);
112                                         return ret;
113                                 }
114                         }
115                 }
116                 close(fd);
117         }
118
119         /* Fallback: pid and time. */
120         gettimeofday(&now, NULL);
121         ret = getpid() * 100132289ULL + now.tv_sec * 1000000ULL + now.tv_usec;
122         tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
123                  "tdb_open: random from getpid and time\n");
124         return ret;
125 }
126
127 struct new_db_head {
128         struct tdb_header hdr;
129         struct free_zone_header zhdr;
130         tdb_off_t free[BUCKETS_FOR_ZONE(INITIAL_ZONE_BITS) + 1];
131         struct tdb_used_record hrec;
132         tdb_off_t hash[1ULL << INITIAL_HASH_BITS];
133         struct tdb_free_record frec;
134 };
135
136 struct new_database {
137         struct new_db_head h;
138         /* Rest up to 1 << INITIAL_ZONE_BITS is empty. */
139         char space[(1 << INITIAL_ZONE_BITS)
140                    - (sizeof(struct new_db_head) - sizeof(struct tdb_header))];
141         uint8_t tailer;
142         /* Don't count final padding! */
143 };
144
145 /* initialise a new database */
146 static int tdb_new_database(struct tdb_context *tdb)
147 {
148         /* We make it up in memory, then write it out if not internal */
149         struct new_database newdb;
150         unsigned int bucket, magic_off, dbsize;
151
152         /* Don't want any extra padding! */
153         dbsize = offsetof(struct new_database, tailer) + sizeof(newdb.tailer);
154
155         /* Fill in the header */
156         newdb.h.hdr.version = TDB_VERSION;
157         newdb.h.hdr.hash_seed = random_number(tdb);
158         newdb.h.hdr.hash_test = TDB_HASH_MAGIC;
159         newdb.h.hdr.hash_test = tdb->khash(&newdb.h.hdr.hash_test,
160                                            sizeof(newdb.h.hdr.hash_test),
161                                            newdb.h.hdr.hash_seed,
162                                            tdb->hash_priv);
163         memset(newdb.h.hdr.reserved, 0, sizeof(newdb.h.hdr.reserved));
164         newdb.h.hdr.v.generation = 0;
165         /* Initial hashes are empty. */
166         newdb.h.hdr.v.hash_bits = INITIAL_HASH_BITS;
167         newdb.h.hdr.v.hash_off = offsetof(struct new_database, h.hash);
168         set_header(tdb, &newdb.h.hrec, 0,
169                    sizeof(newdb.h.hash), sizeof(newdb.h.hash), 0,
170                    INITIAL_ZONE_BITS);
171         memset(newdb.h.hash, 0, sizeof(newdb.h.hash));
172
173         /* Create the single free entry. */
174         newdb.h.frec.magic_and_meta = TDB_FREE_MAGIC | INITIAL_ZONE_BITS;
175         newdb.h.frec.data_len = (sizeof(newdb.h.frec)
176                                  - sizeof(struct tdb_used_record)
177                                  + sizeof(newdb.space));
178
179         /* Free is mostly empty... */
180         newdb.h.zhdr.zone_bits = INITIAL_ZONE_BITS;
181         memset(newdb.h.free, 0, sizeof(newdb.h.free));
182
183         /* ... except for this one bucket. */
184         bucket = size_to_bucket(INITIAL_ZONE_BITS, newdb.h.frec.data_len);
185         newdb.h.free[bucket] = offsetof(struct new_database, h.frec);
186         newdb.h.frec.next = newdb.h.frec.prev = 0;
187
188         /* Tailer contains maximum number of free_zone bits. */
189         newdb.tailer = INITIAL_ZONE_BITS;
190
191         /* Magic food */
192         memset(newdb.h.hdr.magic_food, 0, sizeof(newdb.h.hdr.magic_food));
193         strcpy(newdb.h.hdr.magic_food, TDB_MAGIC_FOOD);
194
195         /* This creates an endian-converted database, as if read from disk */
196         magic_off = offsetof(struct tdb_header, magic_food);
197         tdb_convert(tdb,
198                     (char *)&newdb.h.hdr + magic_off,
199                     dbsize - 1 - magic_off);
200
201         tdb->header = newdb.h.hdr;
202
203         if (tdb->flags & TDB_INTERNAL) {
204                 tdb->map_size = dbsize;
205                 tdb->map_ptr = malloc(tdb->map_size);
206                 if (!tdb->map_ptr) {
207                         tdb->ecode = TDB_ERR_OOM;
208                         return -1;
209                 }
210                 memcpy(tdb->map_ptr, &newdb, tdb->map_size);
211                 return 0;
212         }
213         if (lseek(tdb->fd, 0, SEEK_SET) == -1)
214                 return -1;
215
216         if (ftruncate(tdb->fd, 0) == -1)
217                 return -1;
218
219         if (!tdb_pwrite_all(tdb->fd, &newdb, dbsize, 0)) {
220                 tdb->ecode = TDB_ERR_IO;
221                 return -1;
222         }
223         return 0;
224 }
225
226 struct tdb_context *tdb_open(const char *name, int tdb_flags,
227                              int open_flags, mode_t mode,
228                              union tdb_attribute *attr)
229 {
230         struct tdb_context *tdb;
231         struct stat st;
232         int save_errno;
233         uint64_t hash_test;
234         unsigned v;
235
236         tdb = malloc(sizeof(*tdb));
237         if (!tdb) {
238                 /* Can't log this */
239                 errno = ENOMEM;
240                 goto fail;
241         }
242         tdb->name = NULL;
243         tdb->map_ptr = NULL;
244         tdb->fd = -1;
245         tdb->map_size = sizeof(struct tdb_header);
246         tdb->ecode = TDB_SUCCESS;
247         /* header will be read in below. */
248         tdb->header_uptodate = false;
249         tdb->flags = tdb_flags;
250         tdb->log = null_log_fn;
251         tdb->log_priv = NULL;
252         tdb->khash = jenkins_hash;
253         tdb->hash_priv = NULL;
254         tdb->transaction = NULL;
255         /* last_zone will be set below. */
256         tdb_io_init(tdb);
257         tdb_lock_init(tdb);
258
259         while (attr) {
260                 switch (attr->base.attr) {
261                 case TDB_ATTRIBUTE_LOG:
262                         tdb->log = attr->log.log_fn;
263                         tdb->log_priv = attr->log.log_private;
264                         break;
265                 case TDB_ATTRIBUTE_HASH:
266                         tdb->khash = attr->hash.hash_fn;
267                         tdb->hash_priv = attr->hash.hash_private;
268                         break;
269                 default:
270                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
271                                  "tdb_open: unknown attribute type %u\n",
272                                  attr->base.attr);
273                         errno = EINVAL;
274                         goto fail;
275                 }
276                 attr = attr->base.next;
277         }
278
279         if ((open_flags & O_ACCMODE) == O_WRONLY) {
280                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
281                          "tdb_open: can't open tdb %s write-only\n", name);
282                 errno = EINVAL;
283                 goto fail;
284         }
285
286         if ((open_flags & O_ACCMODE) == O_RDONLY) {
287                 tdb->read_only = true;
288                 /* read only databases don't do locking */
289                 tdb->flags |= TDB_NOLOCK;
290         } else
291                 tdb->read_only = false;
292
293         /* internal databases don't need any of the rest. */
294         if (tdb->flags & TDB_INTERNAL) {
295                 tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP);
296                 if (tdb_new_database(tdb) != 0) {
297                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
298                                  "tdb_open: tdb_new_database failed!");
299                         goto fail;
300                 }
301                 TEST_IT(tdb->flags & TDB_CONVERT);
302                 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
303                 tdb_zone_init(tdb);
304                 return tdb;
305         }
306
307         if ((tdb->fd = open(name, open_flags, mode)) == -1) {
308                 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
309                          "tdb_open: could not open file %s: %s\n",
310                          name, strerror(errno));
311                 goto fail;      /* errno set by open(2) */
312         }
313
314         /* on exec, don't inherit the fd */
315         v = fcntl(tdb->fd, F_GETFD, 0);
316         fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC);
317
318         /* ensure there is only one process initialising at once */
319         if (tdb_lock_open(tdb) == -1) {
320                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
321                          "tdb_open: failed to get open lock on %s: %s\n",
322                          name, strerror(errno));
323                 goto fail;      /* errno set by tdb_brlock */
324         }
325
326         if (!tdb_pread_all(tdb->fd, &tdb->header, sizeof(tdb->header), 0)
327             || strcmp(tdb->header.magic_food, TDB_MAGIC_FOOD) != 0) {
328                 if (!(open_flags & O_CREAT) || tdb_new_database(tdb) == -1) {
329                         if (errno == 0) {
330                                 errno = EIO; /* ie bad format or something */
331                         }
332                         goto fail;
333                 }
334         } else if (tdb->header.version != TDB_VERSION) {
335                 if (tdb->header.version == bswap_64(TDB_VERSION))
336                         tdb->flags |= TDB_CONVERT;
337                 else {
338                         /* wrong version */
339                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
340                                  "tdb_open: %s is unknown version 0x%llx\n",
341                                  name, (long long)tdb->header.version);
342                         errno = EIO;
343                         goto fail;
344                 }
345         }
346
347         tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
348         hash_test = TDB_HASH_MAGIC;
349         hash_test = tdb->khash(&hash_test, sizeof(hash_test),
350                                tdb->header.hash_seed, tdb->hash_priv);
351         if (tdb->header.hash_test != hash_test) {
352                 /* wrong hash variant */
353                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
354                          "tdb_open: %s uses a different hash function\n",
355                          name);
356                 errno = EIO;
357                 goto fail;
358         }
359
360         if (fstat(tdb->fd, &st) == -1)
361                 goto fail;
362
363         /* Is it already in the open list?  If so, fail. */
364         if (tdb_already_open(st.st_dev, st.st_ino)) {
365                 /* FIXME */
366                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
367                          "tdb_open: %s (%d,%d) is already open in this process\n",
368                          name, (int)st.st_dev, (int)st.st_ino);
369                 errno = EBUSY;
370                 goto fail;
371         }
372
373         tdb->name = strdup(name);
374         if (!tdb->name) {
375                 errno = ENOMEM;
376                 goto fail;
377         }
378
379         tdb->device = st.st_dev;
380         tdb->inode = st.st_ino;
381         tdb_unlock_open(tdb);
382
383         /* This make sure we have current map_size and mmap. */
384         tdb->methods->oob(tdb, tdb->map_size + 1, true);
385
386         /* Now we can pick a random free zone to start from. */
387         if (tdb_zone_init(tdb) == -1)
388                 goto fail;
389
390         tdb->next = tdbs;
391         tdbs = tdb;
392         return tdb;
393
394  fail:
395         save_errno = errno;
396
397         if (!tdb)
398                 return NULL;
399
400 #ifdef TDB_TRACE
401         close(tdb->tracefd);
402 #endif
403         if (tdb->map_ptr) {
404                 if (tdb->flags & TDB_INTERNAL) {
405                         free(tdb->map_ptr);
406                 } else
407                         tdb_munmap(tdb);
408         }
409         free((char *)tdb->name);
410         if (tdb->fd != -1)
411                 if (close(tdb->fd) != 0)
412                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
413                                  "tdb_open: failed to close tdb->fd"
414                                  " on error!\n");
415         free(tdb);
416         errno = save_errno;
417         return NULL;
418 }
419
420 tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
421 {
422         return tdb->header.v.hash_off
423                 + ((list & ((1ULL << tdb->header.v.hash_bits) - 1))
424                    * sizeof(tdb_off_t));
425 }
426
427 /* Returns 0 if the entry is a zero (definitely not a match).
428  * Returns a valid entry offset if it's a match.  Fills in rec.
429  * Otherwise returns TDB_OFF_ERR: keep searching. */
430 static tdb_off_t entry_matches(struct tdb_context *tdb,
431                                uint64_t list,
432                                uint64_t hash,
433                                const struct tdb_data *key,
434                                struct tdb_used_record *rec)
435 {
436         tdb_off_t off;
437         uint64_t keylen;
438         const unsigned char *rkey;
439
440         list &= ((1ULL << tdb->header.v.hash_bits) - 1);
441
442         off = tdb_read_off(tdb, tdb->header.v.hash_off
443                            + list * sizeof(tdb_off_t));
444         if (off == 0 || off == TDB_OFF_ERR)
445                 return off;
446
447 #if 0 /* FIXME: Check other bits. */
448         unsigned int bits, bitmask, hoffextra;
449         /* Bottom three bits show how many extra hash bits. */
450         bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1;
451         bitmask = (1 << bits)-1;
452         hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask);
453         uint64_t hextra = hash >> tdb->header.v.hash_bits;
454         if ((hextra & bitmask) != hoffextra) 
455                 return TDB_OFF_ERR;
456         off &= ~...;
457 #endif
458
459         if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
460                 return TDB_OFF_ERR;
461
462         /* FIXME: check extra bits in header! */
463         keylen = rec_key_length(rec);
464         if (keylen != key->dsize)
465                 return TDB_OFF_ERR;
466
467         rkey = tdb_access_read(tdb, off + sizeof(*rec), keylen, false);
468         if (!rkey)
469                 return TDB_OFF_ERR;
470         if (memcmp(rkey, key->dptr, keylen) != 0)
471                 off = TDB_OFF_ERR;
472         tdb_access_release(tdb, rkey);
473         return off;
474 }
475
476 /* FIXME: Optimize? */
477 static void unlock_lists(struct tdb_context *tdb,
478                          tdb_off_t list, tdb_len_t num,
479                          int ltype)
480 {
481         tdb_off_t i;
482
483         for (i = list; i < list + num; i++)
484                 tdb_unlock_list(tdb, i, ltype);
485 }
486
487 /* FIXME: Optimize? */
488 static int lock_lists(struct tdb_context *tdb,
489                       tdb_off_t list, tdb_len_t num,
490                       int ltype)
491 {
492         tdb_off_t i;
493
494         for (i = list; i < list + num; i++) {
495                 if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT)
496                     == TDB_OFF_ERR) {
497                         unlock_lists(tdb, list, i - list, ltype);
498                         return -1;
499                 }
500         }
501         return 0;
502 }
503
504 /* We lock hashes up to the next empty offset.  We already hold the
505  * lock on the start bucket, but we may need to release and re-grab
506  * it.  If we fail, we hold no locks at all! */
507 static tdb_len_t relock_hash_to_zero(struct tdb_context *tdb,
508                                      tdb_off_t start, int ltype)
509 {
510         tdb_len_t num, len;
511
512 again:
513         num = 1ULL << tdb->header.v.hash_bits;
514         len = tdb_find_zero_off(tdb, hash_off(tdb, start), num - start);
515         if (unlikely(len == num - start)) {
516                 /* We hit the end of the hash range.  Drop lock: we have
517                    to lock start of hash first. */
518                 tdb_len_t pre_locks;
519
520                 tdb_unlock_list(tdb, start, ltype);
521
522                 /* Grab something, so header is stable. */
523                 if (tdb_lock_list(tdb, 0, ltype, TDB_LOCK_WAIT))
524                         return TDB_OFF_ERR;
525                 pre_locks = tdb_find_zero_off(tdb, hash_off(tdb, 0), num);
526                 /* We want to lock the zero entry as well. */
527                 pre_locks++;
528                 if (lock_lists(tdb, 1, pre_locks - 1, ltype) == -1) {
529                         tdb_unlock_list(tdb, 0, ltype);
530                         return TDB_OFF_ERR;
531                 }
532
533                 /* Now lock later ones. */
534                 if (unlikely(lock_lists(tdb, start, len, ltype) == -1)) {
535                         unlock_lists(tdb, 0, pre_locks, ltype);
536                         return TDB_OFF_ERR;
537                 }
538                 len += pre_locks;
539         } else {
540                 /* We want to lock the zero entry as well. */
541                 len++;
542                 /* But we already have lock on start. */
543                 if (unlikely(lock_lists(tdb, start+1, len-1, ltype) == -1)) {
544                         tdb_unlock_list(tdb, start, ltype);
545                         return TDB_OFF_ERR;
546                 }
547         }
548
549         /* Now, did we lose the race, and it's not zero any more? */
550         if (unlikely(tdb_read_off(tdb, hash_off(tdb, start + len - 1)) != 0)) {
551                 /* Leave the start locked, as expected. */
552                 unlock_lists(tdb, start + 1, len - 1, ltype);
553                 goto again;
554         }
555
556         return len;
557 }
558
559 /* FIXME: modify, don't rewrite! */
560 static int update_rec_hdr(struct tdb_context *tdb,
561                           tdb_off_t off,
562                           tdb_len_t keylen,
563                           tdb_len_t datalen,
564                           struct tdb_used_record *rec,
565                           uint64_t h)
566 {
567         uint64_t dataroom = rec_data_length(rec) + rec_extra_padding(rec);
568
569         if (set_header(tdb, rec, keylen, datalen, keylen + dataroom, h,
570                        rec_zone_bits(rec)))
571                 return -1;
572
573         return tdb_write_convert(tdb, off, rec, sizeof(*rec));
574 }
575
576 static int hash_add(struct tdb_context *tdb,
577                     uint64_t hash, tdb_off_t off)
578 {
579         tdb_off_t i, hoff, len, num;
580
581         /* Look for next space. */
582         i = (hash & ((1ULL << tdb->header.v.hash_bits) - 1));
583         len = (1ULL << tdb->header.v.hash_bits) - i;
584         num = tdb_find_zero_off(tdb, hash_off(tdb, i), len);
585
586         if (unlikely(num == len)) {
587                 /* We wrapped.  Look through start of hash table. */
588                 i = 0;
589                 hoff = hash_off(tdb, 0);
590                 len = (1ULL << tdb->header.v.hash_bits);
591                 num = tdb_find_zero_off(tdb, hoff, len);
592                 if (num == len) {
593                         tdb->ecode = TDB_ERR_CORRUPT;
594                         tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
595                                  "hash_add: full hash table!\n");
596                         return -1;
597                 }
598         }
599         if (tdb_read_off(tdb, hash_off(tdb, i + num)) != 0) {
600                 tdb->ecode = TDB_ERR_CORRUPT;
601                 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
602                          "hash_add: overwriting hash table?\n");
603                 return -1;
604         }
605
606         /* FIXME: Encode extra hash bits! */
607         return tdb_write_off(tdb, hash_off(tdb, i + num), off);
608 }
609
610 /* If we fail, others will try after us. */
611 static void enlarge_hash(struct tdb_context *tdb)
612 {
613         tdb_off_t newoff, oldoff, i;
614         tdb_len_t hlen;
615         uint64_t num = 1ULL << tdb->header.v.hash_bits;
616         struct tdb_used_record pad, *r;
617         unsigned int records = 0;
618
619         /* FIXME: We should do this without holding locks throughout. */
620         if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
621                 return;
622
623         /* Someone else enlarged for us?  Nothing to do. */
624         if ((1ULL << tdb->header.v.hash_bits) != num)
625                 goto unlock;
626
627 again:
628         /* Allocate our new array. */
629         hlen = num * sizeof(tdb_off_t) * 2;
630         newoff = alloc(tdb, 0, hlen, 0, false);
631         if (unlikely(newoff == TDB_OFF_ERR))
632                 goto unlock;
633         if (unlikely(newoff == 0)) {
634                 if (tdb_expand(tdb, 0, hlen, false) == -1)
635                         goto unlock;
636                 goto again;
637         }
638         /* Step over record header! */
639         newoff += sizeof(struct tdb_used_record);
640
641         /* Starts all zero. */
642         if (zero_out(tdb, newoff, hlen) == -1)
643                 goto unlock;
644
645         /* Update header now so we can use normal routines. */
646         oldoff = tdb->header.v.hash_off;
647
648         tdb->header.v.hash_bits++;
649         tdb->header.v.hash_off = newoff;
650
651         /* FIXME: If the space before is empty, we know this is in its ideal
652          * location.  Or steal a bit from the pointer to avoid rehash. */
653         for (i = 0; i < num; i++) {
654                 tdb_off_t off;
655                 off = tdb_read_off(tdb, oldoff + i * sizeof(tdb_off_t));
656                 if (unlikely(off == TDB_OFF_ERR))
657                         goto oldheader;
658                 if (off && hash_add(tdb, hash_record(tdb, off), off) == -1)
659                         goto oldheader;
660                 if (off)
661                         records++;
662         }
663
664         tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
665                  "enlarge_hash: moved %u records from %llu buckets.\n",
666                  records, (long long)num);
667
668         /* Free up old hash. */
669         r = tdb_get(tdb, oldoff - sizeof(*r), &pad, sizeof(*r));
670         if (!r)
671                 goto oldheader;
672         add_free_record(tdb, rec_zone_bits(r), oldoff - sizeof(*r),
673                         sizeof(*r)+rec_data_length(r)+rec_extra_padding(r));
674
675         /* Now we write the modified header. */
676         write_header(tdb);
677 unlock:
678         tdb_allrecord_unlock(tdb, F_WRLCK);
679         return;
680
681 oldheader:
682         tdb->header.v.hash_bits--;
683         tdb->header.v.hash_off = oldoff;
684         goto unlock;
685 }
686
687
688 /* This is the slow version of the routine which searches the
689  * hashtable for an entry.
690  * We lock every hash bucket up to and including the next zero one.
691  */
692 static tdb_off_t find_and_lock_slow(struct tdb_context *tdb,
693                                     struct tdb_data key,
694                                     uint64_t h,
695                                     int ltype,
696                                     tdb_off_t *start_lock,
697                                     tdb_len_t *num_locks,
698                                     tdb_off_t *bucket,
699                                     struct tdb_used_record *rec)
700 {
701         /* Warning: this may drop the lock on *bucket! */
702         *num_locks = relock_hash_to_zero(tdb, *start_lock, ltype);
703         if (*num_locks == TDB_OFF_ERR)
704                 return TDB_OFF_ERR;
705
706         for (*bucket = *start_lock;
707              *bucket < *start_lock + *num_locks;
708              (*bucket)++) {
709                 tdb_off_t off = entry_matches(tdb, *bucket, h, &key, rec);
710                 /* Empty entry or we found it? */
711                 if (off == 0 || off != TDB_OFF_ERR)
712                         return off;
713         }
714
715         /* We didn't find a zero entry?  Something went badly wrong... */
716         unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype);
717         tdb->ecode = TDB_ERR_CORRUPT;
718         tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
719                  "find_and_lock: expected to find an empty hash bucket!\n");
720         return TDB_OFF_ERR;
721 }
722
723 /* This is the core routine which searches the hashtable for an entry.
724  * On error, no locks are held and TDB_OFF_ERR is returned.
725  * Otherwise, *num_locks locks of type ltype from *start_lock are held.
726  * The bucket where the entry is (or would be) is in *bucket.
727  * If not found, the return value is 0.
728  * If found, the return value is the offset, and *rec is the record. */
729 static tdb_off_t find_and_lock(struct tdb_context *tdb,
730                                struct tdb_data key,
731                                uint64_t h,
732                                int ltype,
733                                tdb_off_t *start_lock,
734                                tdb_len_t *num_locks,
735                                tdb_off_t *bucket,
736                                struct tdb_used_record *rec)
737 {
738         tdb_off_t off;
739
740         /* FIXME: can we avoid locks for some fast paths? */
741         *start_lock = tdb_lock_list(tdb, h, ltype, TDB_LOCK_WAIT);
742         if (*start_lock == TDB_OFF_ERR)
743                 return TDB_OFF_ERR;
744
745         /* Fast path. */
746         off = entry_matches(tdb, *start_lock, h, &key, rec);
747         if (likely(off != TDB_OFF_ERR)) {
748                 *bucket = *start_lock;
749                 *num_locks = 1;
750                 return off;
751         }
752
753         /* Slow path, need to grab more locks and search. */
754         return find_and_lock_slow(tdb, key, h, ltype, start_lock, num_locks,
755                                   bucket, rec);
756 }
757
758 /* Returns -1 on error, 0 on OK, 1 on "expand and retry." */
759 static int replace_data(struct tdb_context *tdb,
760                         uint64_t h, struct tdb_data key, struct tdb_data dbuf,
761                         tdb_off_t bucket,
762                         tdb_off_t old_off, tdb_len_t old_room,
763                         unsigned old_zone,
764                         bool growing)
765 {
766         tdb_off_t new_off;
767
768         /* Allocate a new record. */
769         new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
770         if (unlikely(new_off == TDB_OFF_ERR))
771                 return -1;
772
773         if (unlikely(new_off == 0))
774                 return 1;
775
776         /* We didn't like the existing one: remove it. */
777         if (old_off)
778                 add_free_record(tdb, old_zone, old_off,
779                                 sizeof(struct tdb_used_record)
780                                 + key.dsize + old_room);
781
782         /* FIXME: Encode extra hash bits! */
783         if (tdb_write_off(tdb, hash_off(tdb, bucket), new_off) == -1)
784                 return -1;
785
786         new_off += sizeof(struct tdb_used_record);
787         if (tdb->methods->write(tdb, new_off, key.dptr, key.dsize) == -1)
788                 return -1;
789
790         new_off += key.dsize;
791         if (tdb->methods->write(tdb, new_off, dbuf.dptr, dbuf.dsize) == -1)
792                 return -1;
793
794         /* FIXME: tdb_increment_seqnum(tdb); */
795         return 0;
796 }
797
798 int tdb_store(struct tdb_context *tdb,
799               struct tdb_data key, struct tdb_data dbuf, int flag)
800 {
801         tdb_off_t off, bucket, start, num;
802         tdb_len_t old_room = 0;
803         struct tdb_used_record rec;
804         uint64_t h;
805         int ret;
806
807         h = tdb_hash(tdb, key.dptr, key.dsize);
808         off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
809         if (unlikely(off == TDB_OFF_ERR))
810                 return -1;
811
812         /* Now we have lock on this hash bucket. */
813         if (flag == TDB_INSERT) {
814                 if (off) {
815                         tdb->ecode = TDB_ERR_EXISTS;
816                         goto fail;
817                 }
818         } else {
819                 if (off) {
820                         old_room = rec_data_length(&rec)
821                                 + rec_extra_padding(&rec);
822                         if (old_room >= dbuf.dsize) {
823                                 /* Can modify in-place.  Easy! */
824                                 if (update_rec_hdr(tdb, off,
825                                                    key.dsize, dbuf.dsize,
826                                                    &rec, h))
827                                         goto fail;
828                                 if (tdb->methods->write(tdb, off + sizeof(rec)
829                                                         + key.dsize,
830                                                         dbuf.dptr, dbuf.dsize))
831                                         goto fail;
832                                 unlock_lists(tdb, start, num, F_WRLCK);
833                                 return 0;
834                         }
835                         /* FIXME: See if right record is free? */
836                 } else {
837                         if (flag == TDB_MODIFY) {
838                                 /* if the record doesn't exist and we
839                                    are in TDB_MODIFY mode then we should fail
840                                    the store */
841                                 tdb->ecode = TDB_ERR_NOEXIST;
842                                 goto fail;
843                         }
844                 }
845         }
846
847         /* If we didn't use the old record, this implies we're growing. */
848         ret = replace_data(tdb, h, key, dbuf, bucket, off, old_room,
849                            rec_zone_bits(&rec), off != 0);
850         unlock_lists(tdb, start, num, F_WRLCK);
851
852         if (unlikely(ret == 1)) {
853                 /* Expand, then try again... */
854                 if (tdb_expand(tdb, key.dsize, dbuf.dsize, off != 0) == -1)
855                         return -1;
856                 return tdb_store(tdb, key, dbuf, flag);
857         }
858
859         /* FIXME: by simple simulation, this approximated 60% full.
860          * Check in real case! */
861         if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
862                 enlarge_hash(tdb);
863
864         return ret;
865
866 fail:
867         unlock_lists(tdb, start, num, F_WRLCK);
868         return -1;
869 }
870
871 int tdb_append(struct tdb_context *tdb,
872                struct tdb_data key, struct tdb_data dbuf)
873 {
874         tdb_off_t off, bucket, start, num;
875         struct tdb_used_record rec;
876         tdb_len_t old_room = 0, old_dlen;
877         uint64_t h;
878         unsigned char *newdata;
879         struct tdb_data new_dbuf;
880         int ret;
881
882         h = tdb_hash(tdb, key.dptr, key.dsize);
883         off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
884         if (unlikely(off == TDB_OFF_ERR))
885                 return -1;
886
887         if (off) {
888                 old_dlen = rec_data_length(&rec);
889                 old_room = old_dlen + rec_extra_padding(&rec);
890
891                 /* Fast path: can append in place. */
892                 if (rec_extra_padding(&rec) >= dbuf.dsize) {
893                         if (update_rec_hdr(tdb, off, key.dsize,
894                                            old_dlen + dbuf.dsize, &rec, h))
895                                 goto fail;
896
897                         off += sizeof(rec) + key.dsize + old_dlen;
898                         if (tdb->methods->write(tdb, off, dbuf.dptr,
899                                                 dbuf.dsize) == -1)
900                                 goto fail;
901
902                         /* FIXME: tdb_increment_seqnum(tdb); */
903                         unlock_lists(tdb, start, num, F_WRLCK);
904                         return 0;
905                 }
906                 /* FIXME: Check right record free? */
907
908                 /* Slow path. */
909                 newdata = malloc(key.dsize + old_dlen + dbuf.dsize);
910                 if (!newdata) {
911                         tdb->ecode = TDB_ERR_OOM;
912                         tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
913                                  "tdb_append: cannot allocate %llu bytes!\n",
914                                  (long long)key.dsize + old_dlen + dbuf.dsize);
915                         goto fail;
916                 }
917                 if (tdb->methods->read(tdb, off + sizeof(rec) + key.dsize,
918                                        newdata, old_dlen) != 0) {
919                         free(newdata);
920                         goto fail;
921                 }
922                 memcpy(newdata + old_dlen, dbuf.dptr, dbuf.dsize);
923                 new_dbuf.dptr = newdata;
924                 new_dbuf.dsize = old_dlen + dbuf.dsize;
925         } else {
926                 newdata = NULL;
927                 new_dbuf = dbuf;
928         }
929
930         /* If they're using tdb_append(), it implies they're growing record. */
931         ret = replace_data(tdb, h, key, new_dbuf, bucket, off, old_room,
932                            rec_zone_bits(&rec), true);
933         unlock_lists(tdb, start, num, F_WRLCK);
934         free(newdata);
935
936         if (unlikely(ret == 1)) {
937                 /* Expand, then try again. */
938                 if (tdb_expand(tdb, key.dsize, dbuf.dsize, true) == -1)
939                         return -1;
940                 return tdb_append(tdb, key, dbuf);
941         }
942
943         /* FIXME: by simple simulation, this approximated 60% full.
944          * Check in real case! */
945         if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
946                 enlarge_hash(tdb);
947
948         return ret;
949
950 fail:
951         unlock_lists(tdb, start, num, F_WRLCK);
952         return -1;
953 }
954
955 struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
956 {
957         tdb_off_t off, start, num, bucket;
958         struct tdb_used_record rec;
959         uint64_t h;
960         struct tdb_data ret;
961
962         h = tdb_hash(tdb, key.dptr, key.dsize);
963         off = find_and_lock(tdb, key, h, F_RDLCK, &start, &num, &bucket, &rec);
964         if (unlikely(off == TDB_OFF_ERR))
965                 return tdb_null;
966
967         if (!off) {
968                 tdb->ecode = TDB_ERR_NOEXIST;
969                 ret = tdb_null;
970         } else {
971                 ret.dsize = rec_data_length(&rec);
972                 ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize,
973                                           ret.dsize);
974         }
975
976         unlock_lists(tdb, start, num, F_RDLCK);
977         return ret;
978 }
979
980 int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
981 {
982         tdb_off_t i, bucket, off, start, num;
983         struct tdb_used_record rec;
984         uint64_t h;
985
986         h = tdb_hash(tdb, key.dptr, key.dsize);
987         start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
988         if (unlikely(start == TDB_OFF_ERR))
989                 return -1;
990
991         /* FIXME: Fastpath: if next is zero, we can delete without lock,
992          * since this lock protects us. */
993         off = find_and_lock_slow(tdb, key, h, F_WRLCK,
994                                  &start, &num, &bucket, &rec);
995         if (unlikely(off == TDB_OFF_ERR))
996                 return -1;
997
998         if (!off) {
999                 /* FIXME: We could optimize not found case if it mattered, by
1000                  * reading offset after first lock: if it's zero, goto here. */
1001                 unlock_lists(tdb, start, num, F_WRLCK);
1002                 tdb->ecode = TDB_ERR_NOEXIST;
1003                 return -1;
1004         }
1005         /* Since we found the entry, we must have locked it and a zero. */
1006         assert(num >= 2);
1007
1008         /* This actually unlinks it. */
1009         if (tdb_write_off(tdb, hash_off(tdb, bucket), 0) == -1)
1010                 goto unlock_err;
1011
1012         /* Rehash anything following. */
1013         for (i = bucket+1; i != bucket + num - 1; i++) {
1014                 tdb_off_t hoff, off2;
1015                 uint64_t h2;
1016
1017                 hoff = hash_off(tdb, i);
1018                 off2 = tdb_read_off(tdb, hoff);
1019                 if (unlikely(off2 == TDB_OFF_ERR))
1020                         goto unlock_err;
1021
1022                 /* This can happen if we raced. */
1023                 if (unlikely(off2 == 0))
1024                         break;
1025
1026                 /* Maybe use a bit to indicate it is in ideal place? */
1027                 h2 = hash_record(tdb, off2);
1028                 /* Is it happy where it is? */
1029                 if (hash_off(tdb, h2) == hoff)
1030                         continue;
1031
1032                 /* Remove it. */
1033                 if (tdb_write_off(tdb, hoff, 0) == -1)
1034                         goto unlock_err;
1035
1036                 /* Rehash it. */
1037                 if (hash_add(tdb, h2, off2) == -1)
1038                         goto unlock_err;
1039         }
1040
1041         /* Free the deleted entry. */
1042         if (add_free_record(tdb, rec_zone_bits(&rec), off,
1043                             sizeof(struct tdb_used_record)
1044                             + rec_key_length(&rec)
1045                             + rec_data_length(&rec)
1046                             + rec_extra_padding(&rec)) != 0)
1047                 goto unlock_err;
1048
1049         unlock_lists(tdb, start, num, F_WRLCK);
1050         return 0;
1051
1052 unlock_err:
1053         unlock_lists(tdb, start, num, F_WRLCK);
1054         return -1;
1055 }
1056
1057 int tdb_close(struct tdb_context *tdb)
1058 {
1059         struct tdb_context **i;
1060         int ret = 0;
1061
1062         /* FIXME:
1063         if (tdb->transaction) {
1064                 tdb_transaction_cancel(tdb);
1065         }
1066         */
1067         tdb_trace(tdb, "tdb_close");
1068
1069         if (tdb->map_ptr) {
1070                 if (tdb->flags & TDB_INTERNAL)
1071                         free(tdb->map_ptr);
1072                 else
1073                         tdb_munmap(tdb);
1074         }
1075         free((char *)tdb->name);
1076         if (tdb->fd != -1) {
1077                 ret = close(tdb->fd);
1078                 tdb->fd = -1;
1079         }
1080         free(tdb->lockrecs);
1081
1082         /* Remove from contexts list */
1083         for (i = &tdbs; *i; i = &(*i)->next) {
1084                 if (*i == tdb) {
1085                         *i = tdb->next;
1086                         break;
1087                 }
1088         }
1089
1090 #ifdef TDB_TRACE
1091         close(tdb->tracefd);
1092 #endif
1093         free(tdb);
1094
1095         return ret;
1096 }
1097
1098 enum TDB_ERROR tdb_error(struct tdb_context *tdb)
1099 {
1100         return tdb->ecode;
1101 }