]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/tdb.c
tdb2: clean up locking a little bit, fix hash wrap problem.
[ccan] / ccan / tdb2 / tdb.c
1 #include "private.h"
2 #include <ccan/tdb2/tdb2.h>
3 #include <ccan/hash/hash.h>
4 #include <ccan/build_assert/build_assert.h>
5 #include <ccan/likely/likely.h>
6 #include <assert.h>
7
8 /* The null return. */
9 struct tdb_data tdb_null = { .dptr = NULL, .dsize = 0 };
10
11 /* all contexts, to ensure no double-opens (fcntl locks don't nest!) */
12 static struct tdb_context *tdbs = NULL;
13
14 PRINTF_ATTRIBUTE(4, 5) static void
15 null_log_fn(struct tdb_context *tdb,
16             enum tdb_debug_level level, void *priv,
17             const char *fmt, ...)
18 {
19 }
20
21 /* We do a lot of work assuming our copy of the header volatile area
22  * is uptodate, and usually it is.  However, once we grab a lock, we have to
23  * re-check it. */
24 bool header_changed(struct tdb_context *tdb)
25 {
26         uint64_t gen;
27
28         if (!(tdb->flags & TDB_NOLOCK) && tdb->header_uptodate) {
29                 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
30                          "warning: header uptodate already\n");
31         }
32
33         /* We could get a partial update if we're not holding any locks. */
34         assert((tdb->flags & TDB_NOLOCK) || tdb_has_locks(tdb));
35
36         tdb->header_uptodate = true;
37         gen = tdb_read_off(tdb, offsetof(struct tdb_header, v.generation));
38         if (unlikely(gen != tdb->header.v.generation)) {
39                 tdb_read_convert(tdb, offsetof(struct tdb_header, v),
40                                  &tdb->header.v, sizeof(tdb->header.v));
41                 return true;
42         }
43         return false;
44 }
45
46 int write_header(struct tdb_context *tdb)
47 {
48         assert(tdb_read_off(tdb, offsetof(struct tdb_header, v.generation))
49                == tdb->header.v.generation);
50         tdb->header.v.generation++;
51         return tdb_write_convert(tdb, offsetof(struct tdb_header, v),
52                                  &tdb->header.v, sizeof(tdb->header.v));
53 }
54
55 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
56                              void *arg)
57 {
58         return hash64_stable((const unsigned char *)key, length, seed);
59 }
60
61 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
62 {
63         return tdb->khash(ptr, len, tdb->header.hash_seed, tdb->hash_priv);
64 }
65
66 static bool tdb_already_open(dev_t device, ino_t ino)
67 {
68         struct tdb_context *i;
69         
70         for (i = tdbs; i; i = i->next) {
71                 if (i->device == device && i->inode == ino) {
72                         return true;
73                 }
74         }
75
76         return false;
77 }
78
79 static uint64_t random_number(struct tdb_context *tdb)
80 {
81         int fd;
82         uint64_t ret = 0;
83         struct timeval now;
84
85         fd = open("/dev/urandom", O_RDONLY);
86         if (fd >= 0) {
87                 if (tdb_read_all(fd, &ret, sizeof(ret))) {
88                         tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
89                                  "tdb_open: random from /dev/urandom\n");
90                         close(fd);
91                         return ret;
92                 }
93                 close(fd);
94         }
95         /* FIXME: Untested!  Based on Wikipedia protocol description! */
96         fd = open("/dev/egd-pool", O_RDWR);
97         if (fd >= 0) {
98                 /* Command is 1, next byte is size we want to read. */
99                 char cmd[2] = { 1, sizeof(uint64_t) };
100                 if (write(fd, cmd, sizeof(cmd)) == sizeof(cmd)) {
101                         char reply[1 + sizeof(uint64_t)];
102                         int r = read(fd, reply, sizeof(reply));
103                         if (r > 1) {
104                                 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
105                                          "tdb_open: %u random bytes from"
106                                          " /dev/egd-pool\n", r-1);
107                                 /* Copy at least some bytes. */
108                                 memcpy(&ret, reply+1, r - 1);
109                                 if (reply[0] == sizeof(uint64_t)
110                                     && r == sizeof(reply)) {
111                                         close(fd);
112                                         return ret;
113                                 }
114                         }
115                 }
116                 close(fd);
117         }
118
119         /* Fallback: pid and time. */
120         gettimeofday(&now, NULL);
121         ret = getpid() * 100132289ULL + now.tv_sec * 1000000ULL + now.tv_usec;
122         tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
123                  "tdb_open: random from getpid and time\n");
124         return ret;
125 }
126
127 struct new_database {
128         struct tdb_header hdr;
129         struct tdb_used_record hrec;
130         tdb_off_t hash[1ULL << INITIAL_HASH_BITS];
131         struct tdb_used_record frec;
132         tdb_off_t free[INITIAL_FREE_BUCKETS + 1]; /* One overflow bucket */
133 };
134
135 /* initialise a new database */
136 static int tdb_new_database(struct tdb_context *tdb)
137 {
138         /* We make it up in memory, then write it out if not internal */
139         struct new_database newdb;
140         unsigned int magic_off = offsetof(struct tdb_header, magic_food);
141
142         /* Fill in the header */
143         newdb.hdr.version = TDB_VERSION;
144         newdb.hdr.hash_seed = random_number(tdb);
145         newdb.hdr.hash_test = TDB_HASH_MAGIC;
146         newdb.hdr.hash_test = tdb->khash(&newdb.hdr.hash_test,
147                                          sizeof(newdb.hdr.hash_test),
148                                          newdb.hdr.hash_seed,
149                                          tdb->hash_priv);
150
151         newdb.hdr.v.generation = 0;
152
153         /* The initial zone must cover the initial database size! */
154         BUILD_ASSERT((1ULL << INITIAL_ZONE_BITS) >= sizeof(newdb));
155
156         /* Free array has 1 zone, 10 buckets.  All buckets empty. */
157         newdb.hdr.v.num_zones = 1;
158         newdb.hdr.v.zone_bits = INITIAL_ZONE_BITS;
159         newdb.hdr.v.free_buckets = INITIAL_FREE_BUCKETS;
160         newdb.hdr.v.free_off = offsetof(struct new_database, free);
161         set_header(tdb, &newdb.frec, 0,
162                    sizeof(newdb.free), sizeof(newdb.free), 0);
163         memset(newdb.free, 0, sizeof(newdb.free));
164
165         /* Initial hashes are empty. */
166         newdb.hdr.v.hash_bits = INITIAL_HASH_BITS;
167         newdb.hdr.v.hash_off = offsetof(struct new_database, hash);
168         set_header(tdb, &newdb.hrec, 0,
169                    sizeof(newdb.hash), sizeof(newdb.hash), 0);
170         memset(newdb.hash, 0, sizeof(newdb.hash));
171
172         /* Magic food */
173         memset(newdb.hdr.magic_food, 0, sizeof(newdb.hdr.magic_food));
174         strcpy(newdb.hdr.magic_food, TDB_MAGIC_FOOD);
175
176         /* This creates an endian-converted database, as if read from disk */
177         tdb_convert(tdb,
178                     (char *)&newdb.hdr + magic_off,
179                     sizeof(newdb) - magic_off);
180
181         tdb->header = newdb.hdr;
182
183         if (tdb->flags & TDB_INTERNAL) {
184                 tdb->map_size = sizeof(newdb);
185                 tdb->map_ptr = malloc(tdb->map_size);
186                 if (!tdb->map_ptr) {
187                         tdb->ecode = TDB_ERR_OOM;
188                         return -1;
189                 }
190                 memcpy(tdb->map_ptr, &newdb, tdb->map_size);
191                 return 0;
192         }
193         if (lseek(tdb->fd, 0, SEEK_SET) == -1)
194                 return -1;
195
196         if (ftruncate(tdb->fd, 0) == -1)
197                 return -1;
198
199         if (!tdb_pwrite_all(tdb->fd, &newdb, sizeof(newdb), 0)) {
200                 tdb->ecode = TDB_ERR_IO;
201                 return -1;
202         }
203         return 0;
204 }
205
206 struct tdb_context *tdb_open(const char *name, int tdb_flags,
207                              int open_flags, mode_t mode,
208                              union tdb_attribute *attr)
209 {
210         struct tdb_context *tdb;
211         struct stat st;
212         int save_errno;
213         uint64_t hash_test;
214         unsigned v;
215
216         tdb = malloc(sizeof(*tdb));
217         if (!tdb) {
218                 /* Can't log this */
219                 errno = ENOMEM;
220                 goto fail;
221         }
222         tdb->name = NULL;
223         tdb->map_ptr = NULL;
224         tdb->fd = -1;
225         /* map_size will be set below. */
226         tdb->ecode = TDB_SUCCESS;
227         /* header will be read in below. */
228         tdb->header_uptodate = false;
229         tdb->flags = tdb_flags;
230         tdb->log = null_log_fn;
231         tdb->log_priv = NULL;
232         tdb->khash = jenkins_hash;
233         tdb->hash_priv = NULL;
234         tdb->transaction = NULL;
235         /* last_zone will be set below. */
236         tdb_io_init(tdb);
237         tdb_lock_init(tdb);
238
239         /* FIXME */
240         if (attr) {
241                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
242                          "tdb_open: attributes not yet supported\n");
243                 errno = EINVAL;
244                 goto fail;
245         }
246
247         if ((open_flags & O_ACCMODE) == O_WRONLY) {
248                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
249                          "tdb_open: can't open tdb %s write-only\n", name);
250                 errno = EINVAL;
251                 goto fail;
252         }
253
254         if ((open_flags & O_ACCMODE) == O_RDONLY) {
255                 tdb->read_only = true;
256                 /* read only databases don't do locking */
257                 tdb->flags |= TDB_NOLOCK;
258         } else
259                 tdb->read_only = false;
260
261         /* internal databases don't need any of the rest. */
262         if (tdb->flags & TDB_INTERNAL) {
263                 tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP);
264                 if (tdb_new_database(tdb) != 0) {
265                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
266                                  "tdb_open: tdb_new_database failed!");
267                         goto fail;
268                 }
269                 TEST_IT(tdb->flags & TDB_CONVERT);
270                 tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
271                 return tdb;
272         }
273
274         if ((tdb->fd = open(name, open_flags, mode)) == -1) {
275                 tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
276                          "tdb_open: could not open file %s: %s\n",
277                          name, strerror(errno));
278                 goto fail;      /* errno set by open(2) */
279         }
280
281         /* on exec, don't inherit the fd */
282         v = fcntl(tdb->fd, F_GETFD, 0);
283         fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC);
284
285         /* ensure there is only one process initialising at once */
286         if (tdb_lock_open(tdb) == -1) {
287                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
288                          "tdb_open: failed to get open lock on %s: %s\n",
289                          name, strerror(errno));
290                 goto fail;      /* errno set by tdb_brlock */
291         }
292
293         if (!tdb_pread_all(tdb->fd, &tdb->header, sizeof(tdb->header), 0)
294             || strcmp(tdb->header.magic_food, TDB_MAGIC_FOOD) != 0) {
295                 if (!(open_flags & O_CREAT) || tdb_new_database(tdb) == -1) {
296                         if (errno == 0) {
297                                 errno = EIO; /* ie bad format or something */
298                         }
299                         goto fail;
300                 }
301         } else if (tdb->header.version != TDB_VERSION) {
302                 if (tdb->header.version == bswap_64(TDB_VERSION))
303                         tdb->flags |= TDB_CONVERT;
304                 else {
305                         /* wrong version */
306                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
307                                  "tdb_open: %s is unknown version 0x%llx\n",
308                                  name, (long long)tdb->header.version);
309                         errno = EIO;
310                         goto fail;
311                 }
312         }
313
314         tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
315         hash_test = TDB_HASH_MAGIC;
316         hash_test = tdb->khash(&hash_test, sizeof(hash_test),
317                                tdb->header.hash_seed, tdb->hash_priv);
318         if (tdb->header.hash_test != hash_test) {
319                 /* wrong hash variant */
320                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
321                          "tdb_open: %s uses a different hash function\n",
322                          name);
323                 errno = EIO;
324                 goto fail;
325         }
326
327         if (fstat(tdb->fd, &st) == -1)
328                 goto fail;
329
330         /* Is it already in the open list?  If so, fail. */
331         if (tdb_already_open(st.st_dev, st.st_ino)) {
332                 /* FIXME */
333                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
334                          "tdb_open: %s (%d,%d) is already open in this process\n",
335                          name, (int)st.st_dev, (int)st.st_ino);
336                 errno = EBUSY;
337                 goto fail;
338         }
339
340         tdb->name = strdup(name);
341         if (!tdb->name) {
342                 errno = ENOMEM;
343                 goto fail;
344         }
345
346         tdb->map_size = st.st_size;
347         tdb->device = st.st_dev;
348         tdb->inode = st.st_ino;
349         tdb_mmap(tdb);
350         tdb_unlock_open(tdb);
351         tdb_zone_init(tdb);
352
353         tdb->next = tdbs;
354         tdbs = tdb;
355         return tdb;
356
357  fail:
358         save_errno = errno;
359
360         if (!tdb)
361                 return NULL;
362
363 #ifdef TDB_TRACE
364         close(tdb->tracefd);
365 #endif
366         if (tdb->map_ptr) {
367                 if (tdb->flags & TDB_INTERNAL) {
368                         free(tdb->map_ptr);
369                 } else
370                         tdb_munmap(tdb);
371         }
372         free((char *)tdb->name);
373         if (tdb->fd != -1)
374                 if (close(tdb->fd) != 0)
375                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
376                                  "tdb_open: failed to close tdb->fd"
377                                  " on error!\n");
378         free(tdb);
379         errno = save_errno;
380         return NULL;
381 }
382
383 static tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
384 {
385         return tdb->header.v.hash_off
386                 + ((list & ((1ULL << tdb->header.v.hash_bits) - 1))
387                    * sizeof(tdb_off_t));
388 }
389
390 /* Returns 0 if the entry is a zero (definitely not a match).
391  * Returns a valid entry offset if it's a match.  Fills in rec.
392  * Otherwise returns TDB_OFF_ERR: keep searching. */
393 static tdb_off_t entry_matches(struct tdb_context *tdb,
394                                uint64_t list,
395                                uint64_t hash,
396                                const struct tdb_data *key,
397                                struct tdb_used_record *rec)
398 {
399         tdb_off_t off;
400         uint64_t keylen;
401         const unsigned char *rkey;
402
403         off = tdb_read_off(tdb, tdb->header.v.hash_off
404                            + list * sizeof(tdb_off_t));
405         if (off == 0 || off == TDB_OFF_ERR)
406                 return off;
407
408 #if 0 /* FIXME: Check other bits. */
409         unsigned int bits, bitmask, hoffextra;
410         /* Bottom three bits show how many extra hash bits. */
411         bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1;
412         bitmask = (1 << bits)-1;
413         hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask);
414         uint64_t hextra = hash >> tdb->header.v.hash_bits;
415         if ((hextra & bitmask) != hoffextra) 
416                 return TDB_OFF_ERR;
417         off &= ~...;
418 #endif
419
420         if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
421                 return TDB_OFF_ERR;
422
423         /* FIXME: check extra bits in header! */
424         keylen = rec_key_length(rec);
425         if (keylen != key->dsize)
426                 return TDB_OFF_ERR;
427
428         rkey = tdb_access_read(tdb, off + sizeof(*rec), keylen);
429         if (!rkey)
430                 return TDB_OFF_ERR;
431         if (memcmp(rkey, key->dptr, keylen) != 0)
432                 off = TDB_OFF_ERR;
433         tdb_access_release(tdb, rkey);
434         return off;
435 }
436
437 /* FIXME: Optimize? */
438 static void unlock_range(struct tdb_context *tdb,
439                          tdb_off_t list, tdb_len_t num,
440                          int ltype)
441 {
442         tdb_off_t i;
443
444         for (i = list; i < list + num; i++)
445                 tdb_unlock_list(tdb, i, ltype);
446 }
447
448 /* FIXME: Optimize? */
449 static int lock_range(struct tdb_context *tdb,
450                       tdb_off_t list, tdb_len_t num,
451                       int ltype)
452 {
453         tdb_off_t i;
454
455         for (i = list; i < list + num; i++) {
456                 if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT) != 0) {
457                         unlock_range(tdb, list, i - list, ltype);
458                         return -1;
459                 }
460         }
461         return 0;
462 }
463
464 /* We lock hashes up to the next empty offset.  We already hold the
465  * lock on the start bucket, but we may need to release and re-grab
466  * it.  If we fail, we hold no locks at all! */
467 static tdb_len_t relock_hash_to_zero(struct tdb_context *tdb,
468                                      tdb_off_t start, int ltype)
469 {
470         tdb_len_t num, len, pre_locks;
471
472 again:
473         num = 1ULL << tdb->header.v.hash_bits;
474         len = tdb_find_zero_off(tdb, hash_off(tdb, start), num - start);
475         if (unlikely(len == num - start)) {
476                 /* We hit the end of the hash range.  Drop lock: we have
477                    to lock start of hash first. */
478                 tdb_unlock_list(tdb, start, ltype);
479                 /* Grab something, so header is stable. */
480                 if (tdb_lock_list(tdb, 0, ltype, TDB_LOCK_WAIT))
481                         return TDB_OFF_ERR;
482                 len = tdb_find_zero_off(tdb, hash_off(tdb, 0), num);
483                 if (lock_range(tdb, 1, len, ltype) == -1) {
484                         tdb_unlock_list(tdb, 0, ltype);
485                         return TDB_OFF_ERR;
486                 }
487                 pre_locks = len;
488                 len = num - start;
489         } else {
490                 /* We already have lock on start. */
491                 start++;
492                 pre_locks = 0;
493         }
494         if (unlikely(lock_range(tdb, start, len, ltype) == -1)) {
495                 if (pre_locks)
496                         unlock_range(tdb, 0, pre_locks, ltype);
497                 else
498                         tdb_unlock_list(tdb, start, ltype);
499                 return TDB_OFF_ERR;
500         }
501
502         /* Now, did we lose the race, and it's not zero any more? */
503         if (unlikely(tdb_read_off(tdb, hash_off(tdb, pre_locks + len)) != 0)) {
504                 unlock_range(tdb, 0, pre_locks, ltype);
505                 /* Leave the start locked, as expected. */
506                 unlock_range(tdb, start + 1, len - 1, ltype);
507                 goto again;
508         }
509
510         return pre_locks + len;
511 }
512
513 /* FIXME: modify, don't rewrite! */
514 static int update_rec_hdr(struct tdb_context *tdb,
515                           tdb_off_t off,
516                           tdb_len_t keylen,
517                           tdb_len_t datalen,
518                           struct tdb_used_record *rec,
519                           uint64_t h)
520 {
521         uint64_t room = rec_data_length(rec) + rec_extra_padding(rec);
522
523         if (set_header(tdb, rec, keylen, datalen, room - datalen, h))
524                 return -1;
525
526         return tdb_write_convert(tdb, off, rec, sizeof(*rec));
527 }
528
529 /* If we fail, others will try after us. */
530 static void enlarge_hash(struct tdb_context *tdb)
531 {
532         tdb_off_t newoff, i;
533         uint64_t h, num = 1ULL << tdb->header.v.hash_bits;
534         struct tdb_used_record pad, *r;
535
536         /* FIXME: We should do this without holding locks throughout. */
537         if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
538                 return;
539
540         /* Someone else enlarged for us?  Nothing to do. */
541         if ((1ULL << tdb->header.v.hash_bits) != num)
542                 goto unlock;
543
544         newoff = alloc(tdb, 0, num * 2, 0, false);
545         if (unlikely(newoff == TDB_OFF_ERR))
546                 goto unlock;
547         if (unlikely(newoff == 0)) {
548                 if (tdb_expand(tdb, 0, num * 2, false) == -1)
549                         goto unlock;
550                 newoff = alloc(tdb, 0, num * 2, 0, false);
551                 if (newoff == TDB_OFF_ERR || newoff == 0)
552                         goto unlock;
553         }
554
555         /* FIXME: If the space before is empty, we know this is in its ideal
556          * location.  We can steal a bit from the pointer to avoid rehash. */
557         for (i = tdb_find_nonzero_off(tdb, tdb->header.v.hash_off, num);
558              i < num;
559              i += tdb_find_nonzero_off(tdb, tdb->header.v.hash_off
560                                        + i*sizeof(tdb_off_t), num - i)) {
561                 tdb_off_t off;
562                 off = tdb_read_off(tdb, tdb->header.v.hash_off
563                                    + i*sizeof(tdb_off_t));
564                 if (unlikely(off == TDB_OFF_ERR))
565                         goto unlock;
566                 if (unlikely(!off)) {
567                         tdb->ecode = TDB_ERR_CORRUPT;
568                         tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
569                                  "find_bucket_and_lock: zero hash bucket!\n");
570                         goto unlock;
571                 }
572                 h = hash_record(tdb, off);
573                 /* FIXME: Encode extra hash bits! */
574                 if (tdb_write_off(tdb, newoff
575                                   + (h & ((num * 2) - 1)) * sizeof(uint64_t),
576                                   off) == -1)
577                         goto unlock;
578         }
579
580         /* Free up old hash. */
581         r = tdb_get(tdb, tdb->header.v.hash_off, &pad, sizeof(*r));
582         if (!r)
583                 goto unlock;
584         add_free_record(tdb, tdb->header.v.hash_off,
585                         rec_data_length(r) + rec_extra_padding(r));
586
587         /* Now we write the modified header. */
588         tdb->header.v.generation++;
589         tdb->header.v.hash_bits++;
590         tdb->header.v.hash_off = newoff;
591         tdb_write_convert(tdb, offsetof(struct tdb_header, v),
592                           &tdb->header.v, sizeof(tdb->header.v));
593 unlock:
594         tdb_allrecord_unlock(tdb, F_WRLCK);
595 }
596
597 int tdb_store(struct tdb_context *tdb,
598               struct tdb_data key, struct tdb_data dbuf, int flag)
599 {
600         tdb_off_t new_off, off, old_bucket, start, num_locks = 1;
601         struct tdb_used_record rec;
602         uint64_t h;
603         bool growing = false;
604
605         h = tdb_hash(tdb, key.dptr, key.dsize);
606
607         /* FIXME: can we avoid locks for some fast paths? */
608         start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
609         if (start == TDB_OFF_ERR)
610                 return -1;
611
612         /* Fast path. */
613         old_bucket = start;
614         off = entry_matches(tdb, start, h, &key, &rec);
615         if (unlikely(off == TDB_OFF_ERR)) {
616                 /* Slow path, need to grab more locks and search. */
617                 tdb_off_t i;
618
619                 /* Warning: this may drop the lock!  Does that on error. */
620                 num_locks = relock_hash_to_zero(tdb, start, F_WRLCK);
621                 if (num_locks == TDB_OFF_ERR)
622                         return -1;
623
624                 for (i = start; i < start + num_locks; i++) {
625                         off = entry_matches(tdb, i, h, &key, &rec);
626                         /* Empty entry or we found it? */
627                         if (off == 0 || off != TDB_OFF_ERR) {
628                                 old_bucket = i;
629                                 break;
630                         }
631                 }
632                 if (i == start + num_locks)
633                         off = 0;
634         }
635
636         /* Now we have lock on this hash bucket. */
637         if (flag == TDB_INSERT) {
638                 if (off) {
639                         tdb->ecode = TDB_ERR_EXISTS;
640                         goto fail;
641                 }
642         } else {
643                 if (off) {
644                         if (rec_data_length(&rec) + rec_extra_padding(&rec)
645                             >= dbuf.dsize) {
646                                 new_off = off;
647                                 if (update_rec_hdr(tdb, off,
648                                                    key.dsize, dbuf.dsize,
649                                                    &rec, h))
650                                         goto fail;
651                                 goto write;
652                         }
653                         /* FIXME: See if right record is free? */
654                         /* Hint to allocator that we've realloced. */
655                         growing = true;
656                 } else {
657                         if (flag == TDB_MODIFY) {
658                                 /* if the record doesn't exist and we
659                                    are in TDB_MODIFY mode then we should fail
660                                    the store */
661                                 tdb->ecode = TDB_ERR_NOEXIST;
662                                 goto fail;
663                         }
664                 }
665         }
666
667         /* Allocate a new record. */
668         new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
669         if (new_off == 0) {
670                 unlock_range(tdb, start, num_locks, F_WRLCK);
671                 /* Expand, then try again... */
672                 if (tdb_expand(tdb, key.dsize, dbuf.dsize, growing) == -1)
673                         return -1;
674                 return tdb_store(tdb, key, dbuf, flag);
675         }
676
677         /* We didn't like the existing one: remove it. */
678         if (off) {
679                 add_free_record(tdb, off, sizeof(struct tdb_used_record)
680                                 + rec_key_length(&rec)
681                                 + rec_data_length(&rec)
682                                 + rec_extra_padding(&rec));
683         }
684
685 write:
686         /* FIXME: Encode extra hash bits! */
687         if (tdb_write_off(tdb, hash_off(tdb, old_bucket), new_off) == -1)
688                 goto fail;
689
690         off = new_off + sizeof(struct tdb_used_record);
691         if (tdb->methods->write(tdb, off, key.dptr, key.dsize) == -1)
692                 goto fail;
693         off += key.dsize;
694         if (tdb->methods->write(tdb, off, dbuf.dptr, dbuf.dsize) == -1)
695                 goto fail;
696
697         /* FIXME: tdb_increment_seqnum(tdb); */
698         unlock_range(tdb, start, num_locks, F_WRLCK);
699
700         /* FIXME: by simple simulation, this approximated 60% full.
701          * Check in real case! */
702         if (unlikely(num_locks > 4 * tdb->header.v.hash_bits - 31))
703                 enlarge_hash(tdb);
704
705         return 0;
706
707 fail:
708         unlock_range(tdb, start, num_locks, F_WRLCK);
709         return -1;
710 }
711
712 struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
713 {
714         tdb_off_t off, start, num_locks = 1;
715         struct tdb_used_record rec;
716         uint64_t h;
717         struct tdb_data ret;
718
719         h = tdb_hash(tdb, key.dptr, key.dsize);
720
721         /* FIXME: can we avoid locks for some fast paths? */
722         start = tdb_lock_list(tdb, h, F_RDLCK, TDB_LOCK_WAIT);
723         if (start == TDB_OFF_ERR)
724                 return tdb_null;
725
726         /* Fast path. */
727         off = entry_matches(tdb, start, h, &key, &rec);
728         if (unlikely(off == TDB_OFF_ERR)) {
729                 /* Slow path, need to grab more locks and search. */
730                 tdb_off_t i;
731
732                 /* Warning: this may drop the lock!  Does that on error. */
733                 num_locks = relock_hash_to_zero(tdb, start, F_RDLCK);
734                 if (num_locks == TDB_OFF_ERR)
735                         return tdb_null;
736
737                 for (i = start; i < start + num_locks; i++) {
738                         off = entry_matches(tdb, i, h, &key, &rec);
739                         /* Empty entry or we found it? */
740                         if (off == 0 || off != TDB_OFF_ERR)
741                                 break;
742                 }
743                 if (i == start + num_locks)
744                         off = 0;
745         }
746
747         if (!off) {
748                 unlock_range(tdb, start, num_locks, F_RDLCK);
749                 tdb->ecode = TDB_ERR_NOEXIST;
750                 return tdb_null;
751         }
752
753         ret.dsize = rec_data_length(&rec);
754         ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize,
755                                   ret.dsize);
756         unlock_range(tdb, start, num_locks, F_RDLCK);
757         return ret;
758 }
759
760 static int hash_add(struct tdb_context *tdb, uint64_t h, tdb_off_t off)
761 {
762         tdb_off_t i, hoff, len, num;
763
764         /* Look for next space. */
765         i = (h & ((1ULL << tdb->header.v.hash_bits) - 1));
766         len = (1ULL << tdb->header.v.hash_bits) - i;
767         num = tdb_find_zero_off(tdb, hash_off(tdb, i), len);
768
769         if (unlikely(num == len)) {
770                 /* We wrapped.  Look through start of hash table. */
771                 hoff = hash_off(tdb, 0);
772                 len = (1ULL << tdb->header.v.hash_bits);
773                 num = tdb_find_zero_off(tdb, hoff, len);
774                 if (i == len) {
775                         tdb->ecode = TDB_ERR_CORRUPT;
776                         tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
777                                  "hash_add: full hash table!\n");
778                         return -1;
779                 }
780         }
781         /* FIXME: Encode extra hash bits! */
782         return tdb_write_off(tdb, hash_off(tdb, i + num), off);
783 }
784
785 int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
786 {
787         tdb_off_t i, old_bucket, off, start, num_locks = 1;
788         struct tdb_used_record rec;
789         uint64_t h;
790
791         h = tdb_hash(tdb, key.dptr, key.dsize);
792
793         /* FIXME: can we avoid locks for some fast paths? */
794         start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
795         if (start == TDB_OFF_ERR)
796                 return -1;
797
798         /* Fast path. */
799         old_bucket = start;
800         off = entry_matches(tdb, start, h, &key, &rec);
801         if (off && off != TDB_OFF_ERR) {
802                 /* We can only really fastpath delete if next bucket
803                  * is 0.  Note that we haven't locked it, but our lock
804                  * on this bucket stops anyone overflowing into it
805                  * while we look. */
806                 if (tdb_read_off(tdb, hash_off(tdb, h+1)) == 0)
807                         goto delete;
808                 /* Slow path. */
809                 off = TDB_OFF_ERR;
810         }
811
812         if (unlikely(off == TDB_OFF_ERR)) {
813                 /* Slow path, need to grab more locks and search. */
814                 tdb_off_t i;
815
816                 /* Warning: this may drop the lock!  Does that on error. */
817                 num_locks = relock_hash_to_zero(tdb, start, F_WRLCK);
818                 if (num_locks == TDB_OFF_ERR)
819                         return -1;
820
821                 for (i = start; i < start + num_locks; i++) {
822                         off = entry_matches(tdb, i, h, &key, &rec);
823                         /* Empty entry or we found it? */
824                         if (off == 0 || off != TDB_OFF_ERR) {
825                                 old_bucket = i;
826                                 break;
827                         }
828                 }
829                 if (i == start + num_locks)
830                         off = 0;
831         }
832
833         if (!off) {
834                 unlock_range(tdb, start, num_locks, F_WRLCK);
835                 tdb->ecode = TDB_ERR_NOEXIST;
836                 return -1;
837         }
838
839 delete:
840         /* This actually unlinks it. */
841         if (tdb_write_off(tdb, hash_off(tdb, old_bucket), 0) == -1)
842                 goto unlock_err;
843
844         /* Rehash anything following. */
845         for (i = old_bucket+1; i < h + num_locks; i++) {
846                 tdb_off_t off2;
847                 uint64_t h2;
848
849                 off2 = tdb_read_off(tdb, hash_off(tdb, i));
850                 if (unlikely(off2 == TDB_OFF_ERR))
851                         goto unlock_err;
852
853                 /* Maybe use a bit to indicate it is in ideal place? */
854                 h2 = hash_record(tdb, off2);
855                 /* Is it happy where it is? */
856                 if ((h2 & ((1ULL << tdb->header.v.hash_bits)-1))
857                     == (i & ((1ULL << tdb->header.v.hash_bits)-1)))
858                         continue;
859
860                 /* Remove it. */
861                 if (tdb_write_off(tdb, hash_off(tdb, i), 0) == -1)
862                         goto unlock_err;
863
864                 /* Rehash it. */
865                 if (hash_add(tdb, h2, off2) == -1)
866                         goto unlock_err;
867         }
868
869         /* Free the deleted entry. */
870         if (add_free_record(tdb, off,
871                             sizeof(struct tdb_used_record)
872                             + rec_key_length(&rec)
873                             + rec_data_length(&rec)
874                             + rec_extra_padding(&rec)) != 0)
875                 goto unlock_err;
876
877         unlock_range(tdb, start, num_locks, F_WRLCK);
878         return 0;
879
880 unlock_err:
881         unlock_range(tdb, start, num_locks, F_WRLCK);
882         return -1;
883 }
884
885 int tdb_close(struct tdb_context *tdb)
886 {
887         struct tdb_context **i;
888         int ret = 0;
889
890         /* FIXME:
891         if (tdb->transaction) {
892                 tdb_transaction_cancel(tdb);
893         }
894         */
895         tdb_trace(tdb, "tdb_close");
896
897         if (tdb->map_ptr) {
898                 if (tdb->flags & TDB_INTERNAL)
899                         free(tdb->map_ptr);
900                 else
901                         tdb_munmap(tdb);
902         }
903         free((char *)tdb->name);
904         if (tdb->fd != -1) {
905                 ret = close(tdb->fd);
906                 tdb->fd = -1;
907         }
908         free(tdb->lockrecs);
909
910         /* Remove from contexts list */
911         for (i = &tdbs; *i; i = &(*i)->next) {
912                 if (*i == tdb) {
913                         *i = tdb->next;
914                         break;
915                 }
916         }
917
918 #ifdef TDB_TRACE
919         close(tdb->tracefd);
920 #endif
921         free(tdb);
922
923         return ret;
924 }
925
926 enum TDB_ERROR tdb_error(struct tdb_context *tdb)
927 {
928         return tdb->ecode;
929 }