tdb2: extra debugging checks
[ccan] / ccan / tdb2 / free.c
1  /* 
2    Trivial Database 2: free list/block handling
3    Copyright (C) Rusty Russell 2010
4    
5    This library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 3 of the License, or (at your option) any later version.
9
10    This library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 */
18 #include "private.h"
19 #include <ccan/likely/likely.h>
20 #include <ccan/ilog/ilog.h>
21 #include <time.h>
22 #include <assert.h>
23 #include <limits.h>
24
25 static unsigned fls64(uint64_t val)
26 {
27         return ilog64(val);
28 }
29
30 /* In which bucket would we find a particular record size? (ignoring header) */
31 unsigned int size_to_bucket(unsigned int zone_bits, tdb_len_t data_len)
32 {
33         unsigned int bucket;
34
35         /* We can't have records smaller than this. */
36         assert(data_len >= TDB_MIN_DATA_LEN);
37
38         /* Ignoring the header... */
39         if (data_len - TDB_MIN_DATA_LEN <= 64) {
40                 /* 0 in bucket 0, 8 in bucket 1... 64 in bucket 8. */
41                 bucket = (data_len - TDB_MIN_DATA_LEN) / 8;
42         } else {
43                 /* After that we go power of 2. */
44                 bucket = fls64(data_len - TDB_MIN_DATA_LEN) + 2;
45         }
46
47         if (unlikely(bucket > BUCKETS_FOR_ZONE(zone_bits)))
48                 bucket = BUCKETS_FOR_ZONE(zone_bits);
49         return bucket;
50 }
51
52 /* Subtract 1-byte tailer and header.  Then round up to next power of 2. */
53 static unsigned max_zone_bits(struct tdb_context *tdb)
54 {
55         return fls64(tdb->map_size-1-sizeof(struct tdb_header)-1) + 1;
56 }
57
58 /* Start by using a random zone to spread the load: returns the offset. */
59 static uint64_t random_zone(struct tdb_context *tdb)
60 {
61         struct free_zone_header zhdr;
62         tdb_off_t off = sizeof(struct tdb_header);
63         tdb_len_t half_bits;
64         uint64_t randbits = 0;
65         unsigned int i;
66
67         for (i = 0; i < 64; i += fls64(RAND_MAX)) 
68                 randbits ^= ((uint64_t)random()) << i;
69
70         /* FIXME: Does this work?  Test! */
71         half_bits = max_zone_bits(tdb) - 1;
72         do {
73                 /* Pick left or right side (not outside file) */
74                 if ((randbits & 1)
75                     && !tdb->methods->oob(tdb, off + (1ULL << half_bits)
76                                           + sizeof(zhdr), true)) {
77                         off += 1ULL << half_bits;
78                 }
79                 randbits >>= 1;
80
81                 if (tdb_read_convert(tdb, off, &zhdr, sizeof(zhdr)) == -1) 
82                         return TDB_OFF_ERR;
83
84                 if (zhdr.zone_bits == half_bits)
85                         return off;
86
87                 half_bits--;
88         } while (half_bits >= INITIAL_ZONE_BITS);
89
90         tdb->ecode = TDB_ERR_CORRUPT;
91         tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
92                  "random_zone: zone at %llu smaller than %u bits?",
93                  (long long)off, INITIAL_ZONE_BITS);
94         return TDB_OFF_ERR;
95 }
96
97 int tdb_zone_init(struct tdb_context *tdb)
98 {
99         tdb->zone_off = random_zone(tdb);
100         if (tdb->zone_off == TDB_OFF_ERR)
101                 return -1;
102         if (tdb_read_convert(tdb, tdb->zone_off,
103                              &tdb->zhdr, sizeof(tdb->zhdr)) == -1) 
104                 return -1;
105         return 0;
106 }
107
108 /* Where's the header, given a zone size of 1 << zone_bits? */
109 static tdb_off_t zone_off(tdb_off_t off, unsigned int zone_bits)
110 {
111         off -= sizeof(struct tdb_header);
112         return (off & ~((1ULL << zone_bits) - 1)) + sizeof(struct tdb_header);
113 }
114
115 /* Offset of a given bucket. */
116 /* FIXME: bucket can be "unsigned" everywhere, or even uint8/16. */
117 tdb_off_t bucket_off(tdb_off_t zone_off, tdb_off_t bucket)
118 {
119         return zone_off
120                 + sizeof(struct free_zone_header)
121                 + bucket * sizeof(tdb_off_t);
122 }
123
124 /* Returns free_buckets + 1, or list number to search. */
125 static tdb_off_t find_free_head(struct tdb_context *tdb, tdb_off_t bucket)
126 {
127         /* Speculatively search for a non-zero bucket. */
128         return tdb_find_nonzero_off(tdb, bucket_off(tdb->zone_off, 0),
129                                     bucket,
130                                     BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits) + 1);
131 }
132
133 /* Remove from free bucket. */
134 static int remove_from_list(struct tdb_context *tdb,
135                             tdb_off_t b_off, tdb_off_t r_off,
136                             struct tdb_free_record *r)
137 {
138         tdb_off_t off;
139
140         /* Front of list? */
141         if (r->prev == 0) {
142                 off = b_off;
143         } else {
144                 off = r->prev + offsetof(struct tdb_free_record, next);
145         }
146
147 #ifdef DEBUG
148         if (tdb_read_off(tdb, off) != r_off) {
149                 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
150                          "remove_from_list: %llu bad prev in list %llu\n",
151                          (long long)r_off, (long long)b_off);
152                 return -1;
153         }
154 #endif
155
156         /* r->prev->next = r->next */
157         if (tdb_write_off(tdb, off, r->next)) {
158                 return -1;
159         }
160
161         if (r->next != 0) {
162                 off = r->next + offsetof(struct tdb_free_record, prev);
163                 /* r->next->prev = r->prev */
164
165 #ifdef DEBUG
166                 if (tdb_read_off(tdb, off) != r_off) {
167                         tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
168                                  "remove_from_list: %llu bad list %llu\n",
169                                  (long long)r_off, (long long)b_off);
170                         return -1;
171                 }
172 #endif
173
174                 if (tdb_write_off(tdb, off, r->prev)) {
175                         return -1;
176                 }
177         }
178         return 0;
179 }
180
181 /* Enqueue in this free bucket. */
182 static int enqueue_in_free(struct tdb_context *tdb,
183                            tdb_off_t b_off,
184                            tdb_off_t off,
185                            struct tdb_free_record *new)
186 {
187         new->prev = 0;
188         /* new->next = head. */
189         new->next = tdb_read_off(tdb, b_off);
190         if (new->next == TDB_OFF_ERR)
191                 return -1;
192
193         if (new->next) {
194 #ifdef DEBUG
195                 if (tdb_read_off(tdb,
196                                  new->next
197                                  + offsetof(struct tdb_free_record, prev))
198                     != 0) {
199                         tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
200                                  "enqueue_in_free: %llu bad head prev %llu\n",
201                                  (long long)new->next, (long long)b_off);
202                         return -1;
203                 }
204 #endif
205                 /* next->prev = new. */
206                 if (tdb_write_off(tdb, new->next
207                                   + offsetof(struct tdb_free_record, prev),
208                                   off) != 0)
209                         return -1;
210         }
211         /* head = new */
212         if (tdb_write_off(tdb, b_off, off) != 0)
213                 return -1;
214
215         return tdb_write_convert(tdb, off, new, sizeof(*new));
216 }
217
218 /* List need not be locked. */
219 int add_free_record(struct tdb_context *tdb,
220                     unsigned int zone_bits,
221                     tdb_off_t off, tdb_len_t len_with_header)
222 {
223         struct tdb_free_record new;
224         tdb_off_t b_off;
225         int ret;
226
227         assert(len_with_header >= sizeof(new));
228         assert(zone_bits < (1 << 6));
229
230         new.magic_and_meta = TDB_FREE_MAGIC | zone_bits;
231         new.data_len = len_with_header - sizeof(struct tdb_used_record);
232
233         b_off = bucket_off(zone_off(off, zone_bits),
234                            size_to_bucket(zone_bits, new.data_len));
235         if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) != 0)
236                 return -1;
237
238         ret = enqueue_in_free(tdb, b_off, off, &new);
239         tdb_unlock_free_bucket(tdb, b_off);
240         return ret;
241 }
242
243 /* If we have enough left over to be useful, split that off. */
244 static int to_used_record(struct tdb_context *tdb,
245                           unsigned int zone_bits,
246                           tdb_off_t off,
247                           tdb_len_t needed,
248                           tdb_len_t total_len,
249                           tdb_len_t *actual)
250 {
251         struct tdb_used_record used;
252         tdb_len_t leftover;
253
254         leftover = total_len - needed;
255         if (leftover < sizeof(struct tdb_free_record))
256                 leftover = 0;
257
258         *actual = total_len - leftover;
259
260         if (leftover) {
261                 if (add_free_record(tdb, zone_bits,
262                                     off + sizeof(used) + *actual,
263                                     total_len - needed))
264                         return -1;
265         }
266         return 0;
267 }
268
269 /* Note: we unlock the current bucket if we coalesce or fail. */
270 static int coalesce(struct tdb_context *tdb,
271                     tdb_off_t zone_off, unsigned zone_bits,
272                     tdb_off_t off, tdb_off_t b_off, tdb_len_t data_len)
273 {
274         struct tdb_free_record pad, *r;
275         tdb_off_t end = off + sizeof(struct tdb_used_record) + data_len;
276
277         while (end < (zone_off + (1ULL << zone_bits))) {
278                 tdb_off_t nb_off;
279
280                 /* FIXME: do tdb_get here and below really win? */
281                 r = tdb_get(tdb, end, &pad, sizeof(pad));
282                 if (!r)
283                         goto err;
284
285                 if (frec_magic(r) != TDB_FREE_MAGIC)
286                         break;
287
288                 nb_off = bucket_off(zone_off,
289                                     size_to_bucket(zone_bits, r->data_len));
290
291                 /* We may be violating lock order here, so best effort. */
292                 if (tdb_lock_free_bucket(tdb, nb_off, TDB_LOCK_NOWAIT) == -1)
293                         break;
294
295                 /* Now we have lock, re-check. */
296                 r = tdb_get(tdb, end, &pad, sizeof(pad));
297                 if (!r) {
298                         tdb_unlock_free_bucket(tdb, nb_off);
299                         goto err;
300                 }
301
302                 if (unlikely(frec_magic(r) != TDB_FREE_MAGIC)) {
303                         tdb_unlock_free_bucket(tdb, nb_off);
304                         break;
305                 }
306
307                 if (remove_from_list(tdb, nb_off, end, r) == -1) {
308                         tdb_unlock_free_bucket(tdb, nb_off);
309                         goto err;
310                 }
311
312                 end += sizeof(struct tdb_used_record) + r->data_len;
313                 tdb_unlock_free_bucket(tdb, nb_off);
314         }
315
316         /* Didn't find any adjacent free? */
317         if (end == off + sizeof(struct tdb_used_record) + data_len)
318                 return 0;
319
320         /* OK, expand record */
321         r = tdb_get(tdb, off, &pad, sizeof(pad));
322         if (!r)
323                 goto err;
324
325         if (r->data_len != data_len) {
326                 tdb->ecode = TDB_ERR_CORRUPT;
327                 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
328                          "coalesce: expected data len %llu not %llu\n",
329                          (long long)data_len, (long long)r->data_len);
330                 goto err;
331         }
332
333         if (remove_from_list(tdb, b_off, off, r) == -1)
334                 goto err;
335
336         /* We have to drop this to avoid deadlocks. */
337         tdb_unlock_free_bucket(tdb, b_off);
338
339         if (add_free_record(tdb, zone_bits, off, end - off) == -1)
340                 return -1;
341         return 1;
342
343 err:
344         /* To unify error paths, we *always* unlock bucket on error. */
345         tdb_unlock_free_bucket(tdb, b_off);
346         return -1;
347 }
348
349 /* We need size bytes to put our key and data in. */
350 static tdb_off_t lock_and_alloc(struct tdb_context *tdb,
351                                 tdb_off_t zone_off,
352                                 unsigned zone_bits,
353                                 tdb_off_t bucket,
354                                 size_t size,
355                                 tdb_len_t *actual)
356 {
357         tdb_off_t off, b_off,best_off;
358         struct tdb_free_record pad, best = { 0 }, *r;
359         double multiplier;
360
361 again:
362         b_off = bucket_off(zone_off, bucket);
363
364         /* FIXME: Try non-blocking wait first, to measure contention.
365          * If we're contented, try switching zones, and don't enlarge zone
366          * next time (we want more zones). */
367         /* Lock this bucket. */
368         if (tdb_lock_free_bucket(tdb, b_off, TDB_LOCK_WAIT) == -1) {
369                 return TDB_OFF_ERR;
370         }
371
372         best.data_len = -1ULL;
373         best_off = 0;
374         /* FIXME: Start with larger multiplier if we're growing. */
375         multiplier = 1.0;
376
377         /* Walk the list to see if any are large enough, getting less fussy
378          * as we go. */
379         off = tdb_read_off(tdb, b_off);
380         if (unlikely(off == TDB_OFF_ERR))
381                 goto unlock_err;
382
383         while (off) {
384                 /* FIXME: Does tdb_get win anything here? */
385                 r = tdb_get(tdb, off, &pad, sizeof(*r));
386                 if (!r)
387                         goto unlock_err;
388
389                 if (frec_magic(r) != TDB_FREE_MAGIC) {
390                         tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
391                                  "lock_and_alloc: %llu non-free 0x%llx\n",
392                                  (long long)off, (long long)r->magic_and_meta);
393                         goto unlock_err;
394                 }
395
396                 if (r->data_len >= size && r->data_len < best.data_len) {
397                         best_off = off;
398                         best = *r;
399                 }
400
401                 if (best.data_len < size * multiplier && best_off)
402                         goto use_best;
403
404                 multiplier *= 1.01;
405
406                 /* Since we're going slow anyway, try coalescing here. */
407                 switch (coalesce(tdb, zone_off, zone_bits, off, b_off,
408                                  r->data_len)) {
409                 case -1:
410                         /* This has already unlocked on error. */
411                         return -1;
412                 case 1:
413                         /* This has unlocked list, restart. */
414                         goto again;
415                 }
416                 off = r->next;
417         }
418
419         /* If we found anything at all, use it. */
420         if (best_off) {
421         use_best:
422                 /* We're happy with this size: take it. */
423                 if (remove_from_list(tdb, b_off, best_off, &best) != 0)
424                         goto unlock_err;
425                 tdb_unlock_free_bucket(tdb, b_off);
426
427                 if (to_used_record(tdb, zone_bits, best_off, size,
428                                    best.data_len, actual)) {
429                         return -1;
430                 }
431                 return best_off;
432         }
433
434         tdb_unlock_free_bucket(tdb, b_off);
435         return 0;
436
437 unlock_err:
438         tdb_unlock_free_bucket(tdb, b_off);
439         return TDB_OFF_ERR;
440 }
441
442 static bool next_zone(struct tdb_context *tdb)
443 {
444         tdb_off_t next = tdb->zone_off + (1ULL << tdb->zhdr.zone_bits);
445
446         /* We must have a header. */
447         if (tdb->methods->oob(tdb, next + sizeof(tdb->zhdr), true))
448                 return false;
449
450         tdb->zone_off = next;
451         return tdb_read_convert(tdb, next, &tdb->zhdr, sizeof(tdb->zhdr)) == 0;
452 }
453
454 /* Offset returned is within current zone (which it may alter). */
455 static tdb_off_t get_free(struct tdb_context *tdb, size_t size,
456                           tdb_len_t *actual)
457 {
458         tdb_off_t start_zone = tdb->zone_off, off;
459         bool wrapped = false;
460
461         /* FIXME: If we don't get a hit in the first bucket we want,
462          * try changing zones for next time.  That should help wear
463          * zones evenly, so we don't need to search all of them before
464          * expanding. */
465         while (!wrapped || tdb->zone_off != start_zone) {
466                 tdb_off_t b;
467
468                 /* Shortcut for really huge allocations... */
469                 if ((size >> tdb->zhdr.zone_bits) != 0)
470                         continue;
471
472                 /* Start at exact size bucket, and search up... */
473                 b = size_to_bucket(tdb->zhdr.zone_bits, size);
474                 for (b = find_free_head(tdb, b);
475                      b <= BUCKETS_FOR_ZONE(tdb->zhdr.zone_bits);
476                      b += find_free_head(tdb, b + 1)) {
477                         /* Try getting one from list. */
478                         off = lock_and_alloc(tdb, tdb->zone_off,
479                                              tdb->zhdr.zone_bits,
480                                              b, size, actual);
481                         if (off == TDB_OFF_ERR)
482                                 return TDB_OFF_ERR;
483                         if (off != 0)
484                                 return off;
485                         /* Didn't work.  Try next bucket. */
486                 }
487
488                 /* Didn't work, try next zone, if it exists. */
489                 if (!next_zone(tdb)) {
490                         wrapped = true;
491                         tdb->zone_off = sizeof(struct tdb_header);
492                         if (tdb_read_convert(tdb, tdb->zone_off,
493                                              &tdb->zhdr, sizeof(tdb->zhdr))) {
494                                 return TDB_OFF_ERR;
495                         }
496                 }
497         }
498         return 0;
499 }
500
501 int set_header(struct tdb_context *tdb,
502                struct tdb_used_record *rec,
503                uint64_t keylen, uint64_t datalen,
504                uint64_t actuallen, uint64_t hash,
505                unsigned int zone_bits)
506 {
507         uint64_t keybits = (fls64(keylen) + 1) / 2;
508
509         /* Use bottom bits of hash, so it's independent of hash table size. */
510         rec->magic_and_meta
511                 = zone_bits
512                 | ((hash & ((1 << 5)-1)) << 6)
513                 | ((actuallen - (keylen + datalen)) << 11)
514                 | (keybits << 43)
515                 | (TDB_MAGIC << 48);
516         rec->key_and_data_len = (keylen | (datalen << (keybits*2)));
517
518         /* Encoding can fail on big values. */
519         if (rec_key_length(rec) != keylen
520             || rec_data_length(rec) != datalen
521             || rec_extra_padding(rec) != actuallen - (keylen + datalen)) {
522                 tdb->ecode = TDB_ERR_IO;
523                 tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
524                          "Could not encode k=%llu,d=%llu,a=%llu\n",
525                          (long long)keylen, (long long)datalen,
526                          (long long)actuallen);
527                 return -1;
528         }
529         return 0;
530 }
531
532 static bool zones_happy(struct tdb_context *tdb)
533 {
534         /* FIXME: look at distribution of zones. */
535         return true;
536 }
537
538 /* Assume we want buckets up to the comfort factor. */
539 static tdb_len_t overhead(unsigned int zone_bits)
540 {
541         return sizeof(struct free_zone_header)
542                 + (BUCKETS_FOR_ZONE(zone_bits) + 1) * sizeof(tdb_off_t);
543 }
544
545 /* Expand the database (by adding a zone). */
546 static int tdb_expand(struct tdb_context *tdb, tdb_len_t size)
547 {
548         uint64_t old_size;
549         tdb_off_t off;
550         uint8_t zone_bits;
551         unsigned int num_buckets;
552         tdb_len_t wanted;
553         struct free_zone_header zhdr;
554         bool enlarge_zone;
555
556         /* We need room for the record header too. */
557         wanted = sizeof(struct tdb_used_record) + size;
558
559         /* Only one person can expand file at a time. */
560         if (tdb_lock_expand(tdb, F_WRLCK) != 0)
561                 return -1;
562
563         /* Someone else may have expanded the file, so retry. */
564         old_size = tdb->map_size;
565         tdb->methods->oob(tdb, tdb->map_size + 1, true);
566         if (tdb->map_size != old_size)
567                 goto success;
568
569         /* FIXME: Tailer is a bogus optimization, remove it. */
570         /* zone bits tailer char is protected by EXPAND lock. */
571         if (tdb->methods->read(tdb, old_size - 1, &zone_bits, 1) == -1)
572                 goto fail;
573
574         /* If zones aren't working well, add larger zone if possible. */
575         enlarge_zone = !zones_happy(tdb);
576
577         /* New zone can be between zone_bits or larger if we're on the right
578          * boundary. */
579         for (;;) {
580                 /* Does this fit the allocation comfortably? */
581                 if ((1ULL << zone_bits) >= overhead(zone_bits) + wanted) {
582                         /* Only let enlarge_zone enlarge us once. */
583                         if (!enlarge_zone)
584                                 break;
585                         enlarge_zone = false;
586                 }
587                 if ((old_size - 1 - sizeof(struct tdb_header))
588                     & (1 << zone_bits))
589                         break;
590                 zone_bits++;
591         }
592
593         zhdr.zone_bits = zone_bits;
594         num_buckets = BUCKETS_FOR_ZONE(zone_bits);
595
596         /* FIXME: I don't think we need to expand to full zone, do we? */
597         if (tdb->methods->expand_file(tdb, 1ULL << zone_bits) == -1)
598                 goto fail;
599
600         /* Write new tailer. */
601         if (tdb->methods->write(tdb, tdb->map_size - 1, &zone_bits, 1) == -1)
602                 goto fail;
603
604         /* Write new zone header (just before old tailer). */
605         off = old_size - 1;
606         if (tdb_write_convert(tdb, off, &zhdr, sizeof(zhdr)) == -1)
607                 goto fail;
608
609         /* Now write empty buckets. */
610         off += sizeof(zhdr);
611         if (zero_out(tdb, off, (num_buckets+1) * sizeof(tdb_off_t)) == -1)
612                 goto fail;
613         off += (num_buckets+1) * sizeof(tdb_off_t);
614
615         /* Now add the rest as our free record. */
616         if (add_free_record(tdb, zone_bits, off, tdb->map_size-1-off) == -1)
617                 goto fail;
618
619         /* Try allocating from this zone now. */
620         tdb->zone_off = old_size - 1;
621         tdb->zhdr = zhdr;
622
623 success:
624         tdb_unlock_expand(tdb, F_WRLCK);
625         return 0;
626
627 fail:
628         tdb_unlock_expand(tdb, F_WRLCK);
629         return -1;
630 }
631
632 static tdb_len_t adjust_size(size_t keylen, size_t datalen, bool growing)
633 {
634         tdb_len_t size = keylen + datalen;
635
636         if (size < TDB_MIN_DATA_LEN)
637                 size = TDB_MIN_DATA_LEN;
638
639         /* Overallocate if this is coming from an enlarging store. */
640         if (growing)
641                 size += datalen / 2;
642
643         /* Round to next uint64_t boundary. */
644         return (size + (sizeof(uint64_t) - 1ULL)) & ~(sizeof(uint64_t) - 1ULL);
645 }
646
647 /* This won't fail: it will expand the database if it has to. */
648 tdb_off_t alloc(struct tdb_context *tdb, size_t keylen, size_t datalen,
649                 uint64_t hash, bool growing)
650 {
651         tdb_off_t off;
652         tdb_len_t size, actual;
653         struct tdb_used_record rec;
654
655         /* We can't hold pointers during this: we could unmap! */
656         assert(!tdb->direct_access);
657
658         size = adjust_size(keylen, datalen, growing);
659
660 again:
661         off = get_free(tdb, size, &actual);
662         if (unlikely(off == TDB_OFF_ERR))
663                 return off;
664
665         if (unlikely(off == 0)) {
666                 if (tdb_expand(tdb, size) == -1)
667                         return TDB_OFF_ERR;
668                 goto again;
669         }
670
671         /* Some supergiant values can't be encoded. */
672         /* FIXME: Check before, and limit actual in get_free. */
673         if (set_header(tdb, &rec, keylen, datalen, actual, hash,
674                        tdb->zhdr.zone_bits) != 0) {
675                 add_free_record(tdb, tdb->zhdr.zone_bits, off,
676                                 sizeof(rec) + actual);
677                 return TDB_OFF_ERR;
678         }
679
680         if (tdb_write_convert(tdb, off, &rec, sizeof(rec)) != 0)
681                 return TDB_OFF_ERR;
682         
683         return off;
684 }