]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/lock.c
tdb2: initial commit (doesn't work, still writing tests)
[ccan] / ccan / tdb2 / lock.c
1  /* 
2    Unix SMB/CIFS implementation.
3
4    trivial database library
5
6    Copyright (C) Andrew Tridgell              1999-2005
7    Copyright (C) Paul `Rusty' Russell              2000
8    Copyright (C) Jeremy Allison                    2000-2003
9
10      ** NOTE! The following LGPL license applies to the tdb
11      ** library. This does NOT imply that all of Samba is released
12      ** under the LGPL
13
14    This library is free software; you can redistribute it and/or
15    modify it under the terms of the GNU Lesser General Public
16    License as published by the Free Software Foundation; either
17    version 3 of the License, or (at your option) any later version.
18
19    This library is distributed in the hope that it will be useful,
20    but WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    Lesser General Public License for more details.
23
24    You should have received a copy of the GNU Lesser General Public
25    License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 */
27
28 #include "private.h"
29
30 static int fcntl_lock(struct tdb_context *tdb,
31                       int rw, off_t off, off_t len, bool waitflag)
32 {
33         struct flock fl;
34
35         fl.l_type = rw;
36         fl.l_whence = SEEK_SET;
37         fl.l_start = off;
38         fl.l_len = len;
39         fl.l_pid = 0;
40
41         if (waitflag)
42                 return fcntl(tdb->fd, F_SETLKW, &fl);
43         else
44                 return fcntl(tdb->fd, F_SETLK, &fl);
45 }
46
47 static int fcntl_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
48 {
49         struct flock fl;
50 #if 0 /* Check they matched up locks and unlocks correctly. */
51         char line[80];
52         FILE *locks;
53         bool found = false;
54
55         locks = fopen("/proc/locks", "r");
56
57         while (fgets(line, 80, locks)) {
58                 char *p;
59                 int type, start, l;
60
61                 /* eg. 1: FLOCK  ADVISORY  WRITE 2440 08:01:2180826 0 EOF */
62                 p = strchr(line, ':') + 1;
63                 if (strncmp(p, " POSIX  ADVISORY  ", strlen(" POSIX  ADVISORY  ")))
64                         continue;
65                 p += strlen(" FLOCK  ADVISORY  ");
66                 if (strncmp(p, "READ  ", strlen("READ  ")) == 0)
67                         type = F_RDLCK;
68                 else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0)
69                         type = F_WRLCK;
70                 else
71                         abort();
72                 p += 6;
73                 if (atoi(p) != getpid())
74                         continue;
75                 p = strchr(strchr(p, ' ') + 1, ' ') + 1;
76                 start = atoi(p);
77                 p = strchr(p, ' ') + 1;
78                 if (strncmp(p, "EOF", 3) == 0)
79                         l = 0;
80                 else
81                         l = atoi(p) - start + 1;
82
83                 if (off == start) {
84                         if (len != l) {
85                                 fprintf(stderr, "Len %u should be %u: %s",
86                                         (int)len, l, line);
87                                 abort();
88                         }
89                         if (type != rw) {
90                                 fprintf(stderr, "Type %s wrong: %s",
91                                         rw == F_RDLCK ? "READ" : "WRITE", line);
92                                 abort();
93                         }
94                         found = true;
95                         break;
96                 }
97         }
98
99         if (!found) {
100                 fprintf(stderr, "Unlock on %u@%u not found!\n",
101                         (int)off, (int)len);
102                 abort();
103         }
104
105         fclose(locks);
106 #endif
107
108         fl.l_type = F_UNLCK;
109         fl.l_whence = SEEK_SET;
110         fl.l_start = off;
111         fl.l_len = len;
112         fl.l_pid = 0;
113
114         return fcntl(tdb->fd, F_SETLKW, &fl);
115 }
116
117 /* a byte range locking function - return 0 on success
118    this functions locks/unlocks 1 byte at the specified offset.
119
120    note that a len of zero means lock to end of file
121 */
122 static int tdb_brlock(struct tdb_context *tdb,
123                       int rw_type, tdb_off_t offset, tdb_off_t len,
124                       enum tdb_lock_flags flags)
125 {
126         int ret;
127
128         if (tdb->flags & TDB_NOLOCK) {
129                 return 0;
130         }
131
132         if (rw_type == F_WRLCK && tdb->read_only) {
133                 tdb->ecode = TDB_ERR_RDONLY;
134                 return -1;
135         }
136
137         /* A 32 bit system cannot open a 64-bit file, but it could have
138          * expanded since then: check here. */
139         if ((size_t)(offset + len) != offset + len) {
140                 tdb->ecode = TDB_ERR_IO;
141                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
142                          "tdb_brlock: lock on giant offset %llu\n",
143                          (long long)(offset + len));
144                 return -1;
145         }
146
147         do {
148                 ret = fcntl_lock(tdb, rw_type, offset, len,
149                                  flags & TDB_LOCK_WAIT);
150         } while (ret == -1 && errno == EINTR);
151
152         if (ret == -1) {
153                 tdb->ecode = TDB_ERR_LOCK;
154                 /* Generic lock error. errno set by fcntl.
155                  * EAGAIN is an expected return from non-blocking
156                  * locks. */
157                 if (!(flags & TDB_LOCK_PROBE) && errno != EAGAIN) {
158                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
159                                  "tdb_brlock failed (fd=%d) at"
160                                  " offset %llu rw_type=%d flags=%d len=%llu\n",
161                                  tdb->fd, (long long)offset, rw_type,
162                                  flags, (long long)len);
163                 }
164                 return -1;
165         }
166         return 0;
167 }
168
169 static int tdb_brunlock(struct tdb_context *tdb,
170                         int rw_type, tdb_off_t offset, size_t len)
171 {
172         int ret;
173
174         if (tdb->flags & TDB_NOLOCK) {
175                 return 0;
176         }
177
178         do {
179                 ret = fcntl_unlock(tdb, rw_type, offset, len);
180         } while (ret == -1 && errno == EINTR);
181
182         if (ret == -1) {
183                 tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
184                          "tdb_brunlock failed (fd=%d) at offset %llu"
185                          " rw_type=%d len=%llu\n",
186                          tdb->fd, (long long)offset, rw_type, (long long)len);
187         }
188         return ret;
189 }
190
191 #if 0
192 /*
193   upgrade a read lock to a write lock. This needs to be handled in a
194   special way as some OSes (such as solaris) have too conservative
195   deadlock detection and claim a deadlock when progress can be
196   made. For those OSes we may loop for a while.  
197 */
198 int tdb_allrecord_upgrade(struct tdb_context *tdb)
199 {
200         int count = 1000;
201
202         if (tdb->allrecord_lock.count != 1) {
203                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
204                          "tdb_allrecord_upgrade failed: count %u too high\n",
205                          tdb->allrecord_lock.count);
206                 return -1;
207         }
208
209         if (tdb->allrecord_lock.off != 1) {
210                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
211                          "tdb_allrecord_upgrade failed: already upgraded?\n");
212                 return -1;
213         }
214
215         while (count--) {
216                 struct timeval tv;
217                 if (tdb_brlock(tdb, F_WRLCK,
218                                TDB_HASH_LOCK_START
219                                + (1ULL << tdb->header.v.hash_bits), 0,
220                                TDB_LOCK_WAIT|TDB_LOCK_PROBE) == 0) {
221                         tdb->allrecord_lock.ltype = F_WRLCK;
222                         tdb->allrecord_lock.off = 0;
223                         return 0;
224                 }
225                 if (errno != EDEADLK) {
226                         break;
227                 }
228                 /* sleep for as short a time as we can - more portable than usleep() */
229                 tv.tv_sec = 0;
230                 tv.tv_usec = 1;
231                 select(0, NULL, NULL, NULL, &tv);
232         }
233         tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv,
234                  "tdb_allrecord_upgrade failed\n");
235         return -1;
236 }
237 #endif
238
239 static struct tdb_lock_type *find_nestlock(struct tdb_context *tdb,
240                                            tdb_off_t offset)
241 {
242         unsigned int i;
243
244         for (i=0; i<tdb->num_lockrecs; i++) {
245                 if (tdb->lockrecs[i].off == offset) {
246                         return &tdb->lockrecs[i];
247                 }
248         }
249         return NULL;
250 }
251
252 /* lock an offset in the database. */
253 static int tdb_nest_lock(struct tdb_context *tdb, tdb_off_t offset, int ltype,
254                          enum tdb_lock_flags flags)
255 {
256         struct tdb_lock_type *new_lck;
257
258         if (offset >= TDB_HASH_LOCK_START + (1ULL << tdb->header.v.hash_bits)
259             + (tdb->header.v.num_zones * (tdb->header.v.free_buckets+1))) {
260                 tdb->ecode = TDB_ERR_LOCK;
261                 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
262                          "tdb_lock: invalid offset %llu for ltype=%d\n",
263                          (long long)offset, ltype);
264                 return -1;
265         }
266         if (tdb->flags & TDB_NOLOCK)
267                 return 0;
268
269         new_lck = find_nestlock(tdb, offset);
270         if (new_lck) {
271                 /*
272                  * Just increment the in-memory struct, posix locks
273                  * don't stack.
274                  */
275                 new_lck->count++;
276                 return 0;
277         }
278
279         new_lck = (struct tdb_lock_type *)realloc(
280                 tdb->lockrecs,
281                 sizeof(*tdb->lockrecs) * (tdb->num_lockrecs+1));
282         if (new_lck == NULL) {
283                 tdb->ecode = TDB_ERR_OOM;
284                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
285                          "tdb_lock: unable to allocate %llu lock structure",
286                          (long long)(tdb->num_lockrecs + 1));
287                 errno = ENOMEM;
288                 return -1;
289         }
290         tdb->lockrecs = new_lck;
291
292         /* Since fcntl locks don't nest, we do a lock for the first one,
293            and simply bump the count for future ones */
294         if (tdb_brlock(tdb, ltype, offset, 1, flags)) {
295                 return -1;
296         }
297
298         tdb->lockrecs[tdb->num_lockrecs].off = offset;
299         tdb->lockrecs[tdb->num_lockrecs].count = 1;
300         tdb->lockrecs[tdb->num_lockrecs].ltype = ltype;
301         tdb->num_lockrecs++;
302
303         return 0;
304 }
305
306 static int tdb_lock_and_recover(struct tdb_context *tdb)
307 {
308 #if 0 /* FIXME */
309
310         int ret;
311
312         /* We need to match locking order in transaction commit. */
313         if (tdb_brlock(tdb, F_WRLCK, FREELIST_TOP, 0, TDB_LOCK_WAIT)) {
314                 return -1;
315         }
316
317         if (tdb_brlock(tdb, F_WRLCK, OPEN_LOCK, 1, TDB_LOCK_WAIT)) {
318                 tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
319                 return -1;
320         }
321
322         ret = tdb_transaction_recover(tdb);
323
324         tdb_brunlock(tdb, F_WRLCK, OPEN_LOCK, 1);
325         tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
326
327         return ret;
328 #else
329         abort();
330         return -1;
331 #endif
332 }
333
334 static bool tdb_needs_recovery(struct tdb_context *tdb)
335 {
336         /* FIXME */
337         return false;
338 }
339
340 static int tdb_nest_unlock(struct tdb_context *tdb, tdb_off_t off, int ltype)
341 {
342         int ret = -1;
343         struct tdb_lock_type *lck;
344
345         if (tdb->flags & TDB_NOLOCK)
346                 return 0;
347
348         lck = find_nestlock(tdb, off);
349         if ((lck == NULL) || (lck->count == 0)) {
350                 tdb->ecode = TDB_ERR_LOCK;
351                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
352                          "tdb_unlock: no lock for %llu\n", (long long)off);
353                 return -1;
354         }
355
356         if (lck->count > 1) {
357                 lck->count--;
358                 return 0;
359         }
360
361         /*
362          * This lock has count==1 left, so we need to unlock it in the
363          * kernel. We don't bother with decrementing the in-memory array
364          * element, we're about to overwrite it with the last array element
365          * anyway.
366          */
367         ret = tdb_brunlock(tdb, ltype, off, 1);
368
369         /*
370          * Shrink the array by overwriting the element just unlocked with the
371          * last array element.
372          */
373         *lck = tdb->lockrecs[--tdb->num_lockrecs];
374
375         if (tdb->num_lockrecs == 0) {
376                 /* If we're not holding any locks, header can change. */
377                 tdb->header_uptodate = false;
378         }
379
380         return ret;
381 }
382
383 #if 0
384 /*
385   get the transaction lock
386  */
387 int tdb_transaction_lock(struct tdb_context *tdb, int ltype,
388                          enum tdb_lock_flags lockflags)
389 {
390         return tdb_nest_lock(tdb, TRANSACTION_LOCK, ltype, lockflags);
391 }
392
393 /*
394   release the transaction lock
395  */
396 int tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
397 {
398         return tdb_nest_unlock(tdb, TRANSACTION_LOCK, ltype, false);
399 }
400 #endif
401
402 /* We only need to lock individual bytes, but Linux merges consecutive locks
403  * so we lock in contiguous ranges. */
404 static int tdb_lock_gradual(struct tdb_context *tdb,
405                             int ltype, enum tdb_lock_flags flags,
406                             tdb_off_t off, tdb_off_t len)
407 {
408         int ret;
409         enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT);
410
411         if (len <= 4) {
412                 /* Single record.  Just do blocking lock. */
413                 return tdb_brlock(tdb, ltype, off, len, flags);
414         }
415
416         /* First we try non-blocking. */
417         ret = tdb_brlock(tdb, ltype, off, len, nb_flags);
418         if (ret == 0) {
419                 return 0;
420         }
421
422         /* Try locking first half, then second. */
423         ret = tdb_lock_gradual(tdb, ltype, flags, off, len / 2);
424         if (ret == -1)
425                 return -1;
426
427         ret = tdb_lock_gradual(tdb, ltype, flags,
428                                     off + len / 2, len - len / 2);
429         if (ret == -1) {
430                 tdb_brunlock(tdb, ltype, off, len / 2);
431                 return -1;
432         }
433         return 0;
434 }
435
436 /* lock/unlock entire database.  It can only be upgradable if you have some
437  * other way of guaranteeing exclusivity (ie. transaction write lock).
438  * Note that we don't lock the free chains: noone can get those locks
439  * without a hash chain lock first. */
440 int tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
441                        enum tdb_lock_flags flags, bool upgradable)
442 {
443         tdb_off_t hash_size;
444
445         /* FIXME: There are no locks on read-only dbs */
446         if (tdb->read_only) {
447                 tdb->ecode = TDB_ERR_LOCK;
448                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
449                          "tdb_allrecord_lock: read-only\n");
450                 return -1;
451         }
452
453         if (tdb->allrecord_lock.count && tdb->allrecord_lock.ltype == ltype) {
454                 tdb->allrecord_lock.count++;
455                 return 0;
456         }
457
458         if (tdb->allrecord_lock.count) {
459                 /* a global lock of a different type exists */
460                 tdb->ecode = TDB_ERR_LOCK;
461                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
462                          "tdb_allrecord_lock: already have %s lock\n",
463                          tdb->allrecord_lock.ltype == F_RDLCK
464                          ? "read" : "write");
465                 return -1;
466         }
467
468         if (tdb_has_locks(tdb)) {
469                 /* can't combine global and chain locks */
470                 tdb->ecode = TDB_ERR_LOCK;
471                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
472                          "tdb_allrecord_lock: already have chain lock\n");
473                 return -1;
474         }
475
476         if (upgradable && ltype != F_RDLCK) {
477                 /* tdb error: you can't upgrade a write lock! */
478                 tdb->ecode = TDB_ERR_LOCK;
479                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
480                          "tdb_allrecord_lock: can't upgrade a write lock\n");
481                 return -1;
482         }
483
484         /* Lock all the hash buckets. */
485 again:
486         hash_size = (1ULL << tdb->header.v.hash_bits);
487         if (tdb_lock_gradual(tdb, ltype, TDB_HASH_LOCK_START,
488                              1ULL << tdb->header.v.hash_bits, flags)) {
489                 if (!(flags & TDB_LOCK_PROBE)) {
490                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
491                                  "tdb_lockall hashes failed (%s)\n",
492                                  strerror(errno));
493                 }
494                 return -1;
495         }
496
497         /* Now we re-check header, holding lock. */
498         if (unlikely(update_header(tdb))) {
499                 tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START, hash_size);
500                 goto again;
501         }
502
503         /* Now check for needing recovery. */
504         if (unlikely(tdb_needs_recovery(tdb))) {
505                 tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START, hash_size);
506                 if (tdb_lock_and_recover(tdb) == -1) {
507                         return -1;
508                 }               
509                 goto again;
510         }
511
512
513         tdb->allrecord_lock.count = 1;
514         /* If it's upgradable, it's actually exclusive so we can treat
515          * it as a write lock. */
516         tdb->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
517         tdb->allrecord_lock.off = upgradable;
518         return 0;
519 }
520
521 int tdb_lock_open(struct tdb_context *tdb)
522 {
523         return tdb_nest_lock(tdb, TDB_OPEN_LOCK, F_WRLCK, TDB_LOCK_WAIT);
524 }
525
526 void tdb_unlock_open(struct tdb_context *tdb)
527 {
528         tdb_nest_unlock(tdb, TDB_OPEN_LOCK, F_WRLCK);
529 }
530
531 /* unlock entire db */
532 int tdb_allrecord_unlock(struct tdb_context *tdb, int ltype)
533 {
534         tdb_off_t hash_size;
535
536         /* FIXME: There are no locks on read-only dbs */
537         if (tdb->read_only) {
538                 tdb->ecode = TDB_ERR_LOCK;
539                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
540                          "tdb_allrecord_unlock: read-only\n");
541                 return -1;
542         }
543
544         if (tdb->allrecord_lock.count == 0) {
545                 tdb->ecode = TDB_ERR_LOCK;
546                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
547                          "tdb_allrecord_unlock: not locked!\n");
548                 return -1;
549         }
550
551         /* Upgradable locks are marked as write locks. */
552         if (tdb->allrecord_lock.ltype != ltype
553             && (!tdb->allrecord_lock.off || ltype != F_RDLCK)) {
554                 tdb->ecode = TDB_ERR_LOCK;
555                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
556                          "tdb_allrecord_unlock: have %s lock\n",
557                          tdb->allrecord_lock.ltype == F_RDLCK
558                          ? "read" : "write");
559                 return -1;
560         }
561
562         if (tdb->allrecord_lock.count > 1) {
563                 tdb->allrecord_lock.count--;
564                 return 0;
565         }
566
567         tdb->allrecord_lock.count = 0;
568         tdb->allrecord_lock.ltype = 0;
569
570         hash_size = (1ULL << tdb->header.v.hash_bits);
571
572         return tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START, hash_size);
573 }
574
575 bool tdb_has_locks(struct tdb_context *tdb)
576 {
577         return tdb->allrecord_lock.count || tdb->num_lockrecs;
578 }
579
580 #if 0
581 /* lock entire database with write lock */
582 int tdb_lockall(struct tdb_context *tdb)
583 {
584         tdb_trace(tdb, "tdb_lockall");
585         return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
586 }
587
588 /* lock entire database with write lock - nonblocking varient */
589 int tdb_lockall_nonblock(struct tdb_context *tdb)
590 {
591         int ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_NOWAIT, false);
592         tdb_trace_ret(tdb, "tdb_lockall_nonblock", ret);
593         return ret;
594 }
595
596 /* unlock entire database with write lock */
597 int tdb_unlockall(struct tdb_context *tdb)
598 {
599         tdb_trace(tdb, "tdb_unlockall");
600         return tdb_allrecord_unlock(tdb, F_WRLCK);
601 }
602
603 /* lock entire database with read lock */
604 int tdb_lockall_read(struct tdb_context *tdb)
605 {
606         tdb_trace(tdb, "tdb_lockall_read");
607         return tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
608 }
609
610 /* lock entire database with read lock - nonblock varient */
611 int tdb_lockall_read_nonblock(struct tdb_context *tdb)
612 {
613         int ret = tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_NOWAIT, false);
614         tdb_trace_ret(tdb, "tdb_lockall_read_nonblock", ret);
615         return ret;
616 }
617
618 /* unlock entire database with read lock */
619 int tdb_unlockall_read(struct tdb_context *tdb)
620 {
621         tdb_trace(tdb, "tdb_unlockall_read");
622         return tdb_allrecord_unlock(tdb, F_RDLCK);
623 }
624 #endif
625
626 int tdb_lock_list(struct tdb_context *tdb, tdb_off_t list,
627                   int ltype, enum tdb_lock_flags waitflag)
628 {
629         /* a allrecord lock allows us to avoid per chain locks */
630         if (tdb->allrecord_lock.count &&
631             (ltype == tdb->allrecord_lock.ltype || ltype == F_RDLCK)) {
632                 return 0;
633         }
634
635         if (tdb->allrecord_lock.count) {
636                 tdb->ecode = TDB_ERR_LOCK;
637                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
638                          "tdb_lock_list: have %s allrecordlock\n",
639                          tdb->allrecord_lock.ltype == F_RDLCK
640                          ? "read" : "write");
641                 return -1;
642         }
643
644         /* FIXME: Should we do header_uptodate and return retry here? */
645         return tdb_nest_lock(tdb, TDB_HASH_LOCK_START + list, ltype, waitflag);
646 }
647
648 int tdb_unlock_list(struct tdb_context *tdb, tdb_off_t list, int ltype)
649 {
650         /* a allrecord lock allows us to avoid per chain locks */
651         if (tdb->allrecord_lock.count) {
652                 if (tdb->allrecord_lock.ltype == F_RDLCK
653                     && ltype == F_WRLCK) {
654                         tdb->ecode = TDB_ERR_LOCK;
655                         tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
656                                  "tdb_unlock_list RO allrecord!\n");
657                         return -1;
658                 }
659                 return 0;
660         } else {
661                 return tdb_nest_unlock(tdb, TDB_HASH_LOCK_START + list, ltype);
662         }
663 }
664
665 /* Free list locks come after hash locks */
666 int tdb_lock_free_list(struct tdb_context *tdb, tdb_off_t flist,
667                        enum tdb_lock_flags waitflag)
668 {
669         /* You're supposed to have a hash lock first! */
670         if (!tdb_has_locks(tdb)) {
671                 tdb->ecode = TDB_ERR_LOCK;
672                 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
673                          "tdb_lock_free_list without lock!\n");
674                 return -1;
675         }
676
677         /* a allrecord lock allows us to avoid per chain locks */
678         if (tdb->allrecord_lock.count) {
679                 if (tdb->allrecord_lock.ltype == F_WRLCK)
680                         return 0;
681                 tdb->ecode = TDB_ERR_LOCK;
682                 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
683                          "tdb_lock_free_list with RO allrecordlock!\n");
684                 return -1;
685         }
686
687         return tdb_nest_lock(tdb, TDB_HASH_LOCK_START
688                              + (1ULL << tdb->header.v.hash_bits)
689                              + flist, F_WRLCK, waitflag);
690 }
691
692 void tdb_unlock_free_list(struct tdb_context *tdb, tdb_off_t flist)
693 {
694         if (tdb->allrecord_lock.count)
695                 return;
696
697         tdb_nest_unlock(tdb, TDB_HASH_LOCK_START
698                         + (1ULL << tdb->header.v.hash_bits)
699                         + flist, F_WRLCK);
700 }
701
702 #if 0
703 static int chainlock_loop(struct tdb_context *tdb, const TDB_DATA *key,
704                           int ltype, enum tdb_lock_flags waitflag,
705                           const char *func)
706 {
707         int ret;
708         uint64_t h = tdb_hash(tdb, key->dptr, key->dsize);
709
710 again:
711         ret = tdb_lock_list(tdb,
712                             h & ((1ULL << tdb->header.v.hash_bits) - 1),
713                             ltype, waitflag);
714         if (likely(ret == 0) && unlikely(update_header(tdb))) {
715                 tdb_unlock_list(tdb, h & ((1ULL << tdb->header.v.hash_bits)-1),
716                                 ltype);
717                 goto again;
718         }
719
720         tdb_trace_1rec(tdb, func, *key);
721         return ret;
722 }
723
724 /* lock/unlock one hash chain. This is meant to be used to reduce
725    contention - it cannot guarantee how many records will be locked */
726 int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
727 {
728         return chainlock_loop(tdb, &key, F_WRLCK, TDB_LOCK_WAIT,
729                               "tdb_chainlock");
730 }
731
732 /* lock/unlock one hash chain, non-blocking. This is meant to be used
733    to reduce contention - it cannot guarantee how many records will be
734    locked */
735 int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key)
736 {
737         return chainlock_loop(tdb, &key, F_WRLCK, TDB_LOCK_NOWAIT,
738                               "tdb_chainlock_nonblock");
739 }
740
741 int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
742 {
743         uint64_t h = tdb_hash(tdb, key.dptr, key.dsize);
744         tdb_trace_1rec(tdb, "tdb_chainunlock", key);
745         return tdb_unlock_list(tdb, h & ((1ULL << tdb->header.v.hash_bits)-1),
746                                F_WRLCK);
747 }
748
749 int tdb_chainlock_read(struct tdb_context *tdb, TDB_DATA key)
750 {
751         return chainlock_loop(tdb, &key, F_RDLCK, TDB_LOCK_WAIT,
752                               "tdb_chainlock_read");
753 }
754
755 int tdb_chainunlock_read(struct tdb_context *tdb, TDB_DATA key)
756 {
757         uint64_t h = tdb_hash(tdb, key.dptr, key.dsize);
758         tdb_trace_1rec(tdb, "tdb_chainunlock_read", key);
759         return tdb_unlock_list(tdb, h & ((1ULL << tdb->header.v.hash_bits)-1),
760                                F_RDLCK);
761 }
762
763 /* record lock stops delete underneath */
764 int tdb_lock_record(struct tdb_context *tdb, tdb_off_t off)
765 {
766         if (tdb->allrecord_lock.count) {
767                 return 0;
768         }
769         return off ? tdb_brlock(tdb, F_RDLCK, off, 1, TDB_LOCK_WAIT) : 0;
770 }
771
772 /*
773   Write locks override our own fcntl readlocks, so check it here.
774   Note this is meant to be F_SETLK, *not* F_SETLKW, as it's not
775   an error to fail to get the lock here.
776 */
777 int tdb_write_lock_record(struct tdb_context *tdb, tdb_off_t off)
778 {
779         struct tdb_traverse_lock *i;
780         for (i = &tdb->travlocks; i; i = i->next)
781                 if (i->off == off)
782                         return -1;
783         if (tdb->allrecord_lock.count) {
784                 if (tdb->allrecord_lock.ltype == F_WRLCK) {
785                         return 0;
786                 }
787                 return -1;
788         }
789         return tdb_brlock(tdb, F_WRLCK, off, 1, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE);
790 }
791
792 int tdb_write_unlock_record(struct tdb_context *tdb, tdb_off_t off)
793 {
794         if (tdb->allrecord_lock.count) {
795                 return 0;
796         }
797         return tdb_brunlock(tdb, F_WRLCK, off, 1);
798 }
799
800 /* fcntl locks don't stack: avoid unlocking someone else's */
801 int tdb_unlock_record(struct tdb_context *tdb, tdb_off_t off)
802 {
803         struct tdb_traverse_lock *i;
804         uint32_t count = 0;
805
806         if (tdb->allrecord_lock.count) {
807                 return 0;
808         }
809
810         if (off == 0)
811                 return 0;
812         for (i = &tdb->travlocks; i; i = i->next)
813                 if (i->off == off)
814                         count++;
815         return (count == 1 ? tdb_brunlock(tdb, F_RDLCK, off, 1) : 0);
816 }
817
818 /* The transaction code uses this to remove all locks. */
819 void tdb_release_transaction_locks(struct tdb_context *tdb)
820 {
821         unsigned int i;
822
823         if (tdb->allrecord_lock.count != 0) {
824                 tdb_off_t hash_size, free_size;
825
826                 hash_size = (1ULL << tdb->header.v.hash_bits)
827                         * sizeof(tdb_off_t);
828                 free_size = tdb->header.v.free_zones 
829                         * (tdb->header.v.free_buckets + 1) * sizeof(tdb_off_t);
830
831                 tdb_brunlock(tdb, tdb->allrecord_lock.ltype,
832                              tdb->header.v.hash_off, hash_size);
833                 tdb_brunlock(tdb, tdb->allrecord_lock.ltype,
834                              tdb->header.v.free_off, free_size);
835                 tdb->allrecord_lock.count = 0;
836                 tdb->allrecord_lock.ltype = 0;
837         }
838
839         for (i = 0; i<tdb->num_lockrecs; i++) {
840                 struct tdb_lock_type *lck = &tdb->lockrecs[i];
841
842                 tdb_brunlock(tdb, lck->ltype, lck->off, 1);
843         }
844         tdb->num_lockrecs = 0;
845         SAFE_FREE(tdb->lockrecs);
846         tdb->header_uptodate = false;
847 }
848 #endif