X-Git-Url: http://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Ftdb2%2Ftdb.c;h=dfa68451e51a878d4be75f5431121ff1f012b9d4;hp=7c8d3cd764fc4021f9cfa3d174e6f4d004116edc;hb=a42bba8ec446284256a7c9146ba3525404de474c;hpb=25ed8785693e98492c0c516a8845866ec784f2b9 diff --git a/ccan/tdb2/tdb.c b/ccan/tdb2/tdb.c index 7c8d3cd7..dfa68451 100644 --- a/ccan/tdb2/tdb.c +++ b/ccan/tdb2/tdb.c @@ -1,9 +1,7 @@ #include "private.h" #include -#include -#include -#include #include +#include /* The null return. */ struct tdb_data tdb_null = { .dptr = NULL, .dsize = 0 }; @@ -11,58 +9,6 @@ struct tdb_data tdb_null = { .dptr = NULL, .dsize = 0 }; /* all contexts, to ensure no double-opens (fcntl locks don't nest!) */ static struct tdb_context *tdbs = NULL; -PRINTF_ATTRIBUTE(4, 5) static void -null_log_fn(struct tdb_context *tdb, - enum tdb_debug_level level, void *priv, - const char *fmt, ...) -{ -} - -/* We do a lot of work assuming our copy of the header volatile area - * is uptodate, and usually it is. However, once we grab a lock, we have to - * re-check it. */ -bool header_changed(struct tdb_context *tdb) -{ - uint64_t gen; - - if (!(tdb->flags & TDB_NOLOCK) && tdb->header_uptodate) { - tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv, - "warning: header uptodate already\n"); - } - - /* We could get a partial update if we're not holding any locks. */ - assert((tdb->flags & TDB_NOLOCK) || tdb_has_locks(tdb)); - - tdb->header_uptodate = true; - gen = tdb_read_off(tdb, offsetof(struct tdb_header, v.generation)); - if (unlikely(gen != tdb->header.v.generation)) { - tdb_read_convert(tdb, offsetof(struct tdb_header, v), - &tdb->header.v, sizeof(tdb->header.v)); - return true; - } - return false; -} - -int write_header(struct tdb_context *tdb) -{ - assert(tdb_read_off(tdb, offsetof(struct tdb_header, v.generation)) - == tdb->header.v.generation); - tdb->header.v.generation++; - return tdb_write_convert(tdb, offsetof(struct tdb_header, v), - &tdb->header.v, sizeof(tdb->header.v)); -} - -static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed, - void *arg) -{ - return hash64_stable((const unsigned char *)key, length, seed); -} - -uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len) -{ - return tdb->khash(ptr, len, tdb->header.hash_seed, tdb->hash_priv); -} - static bool tdb_already_open(dev_t device, ino_t ino) { struct tdb_context *i; @@ -85,8 +31,8 @@ static uint64_t random_number(struct tdb_context *tdb) fd = open("/dev/urandom", O_RDONLY); if (fd >= 0) { if (tdb_read_all(fd, &ret, sizeof(ret))) { - tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv, - "tdb_open: random from /dev/urandom\n"); + tdb_logerr(tdb, TDB_SUCCESS, TDB_DEBUG_TRACE, + "tdb_open: random from /dev/urandom"); close(fd); return ret; } @@ -101,9 +47,9 @@ static uint64_t random_number(struct tdb_context *tdb) char reply[1 + sizeof(uint64_t)]; int r = read(fd, reply, sizeof(reply)); if (r > 1) { - tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv, - "tdb_open: %u random bytes from" - " /dev/egd-pool\n", r-1); + tdb_logerr(tdb, TDB_SUCCESS, TDB_DEBUG_TRACE, + "tdb_open: %u random bytes from" + " /dev/egd-pool", r-1); /* Copy at least some bytes. */ memcpy(&ret, reply+1, r - 1); if (reply[0] == sizeof(uint64_t) @@ -119,72 +65,65 @@ static uint64_t random_number(struct tdb_context *tdb) /* Fallback: pid and time. */ gettimeofday(&now, NULL); ret = getpid() * 100132289ULL + now.tv_sec * 1000000ULL + now.tv_usec; - tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv, - "tdb_open: random from getpid and time\n"); + tdb_logerr(tdb, TDB_SUCCESS, TDB_DEBUG_TRACE, + "tdb_open: random from getpid and time"); return ret; } struct new_database { struct tdb_header hdr; - struct tdb_used_record hrec; - tdb_off_t hash[1ULL << INITIAL_HASH_BITS]; - struct tdb_used_record frec; - tdb_off_t free[INITIAL_FREE_BUCKETS + 1]; /* One overflow bucket */ + struct tdb_freetable ftable; }; /* initialise a new database */ -static int tdb_new_database(struct tdb_context *tdb) +static int tdb_new_database(struct tdb_context *tdb, + struct tdb_attribute_seed *seed, + struct tdb_header *hdr) { /* We make it up in memory, then write it out if not internal */ struct new_database newdb; - unsigned int magic_off = offsetof(struct tdb_header, magic_food); + unsigned int magic_len; /* Fill in the header */ newdb.hdr.version = TDB_VERSION; - newdb.hdr.hash_seed = random_number(tdb); + if (seed) + newdb.hdr.hash_seed = seed->seed; + else + newdb.hdr.hash_seed = random_number(tdb); newdb.hdr.hash_test = TDB_HASH_MAGIC; newdb.hdr.hash_test = tdb->khash(&newdb.hdr.hash_test, sizeof(newdb.hdr.hash_test), newdb.hdr.hash_seed, tdb->hash_priv); - - newdb.hdr.v.generation = 0; - - /* The initial zone must cover the initial database size! */ - BUILD_ASSERT((1ULL << INITIAL_ZONE_BITS) >= sizeof(newdb)); - - /* Free array has 1 zone, 10 buckets. All buckets empty. */ - newdb.hdr.v.num_zones = 1; - newdb.hdr.v.zone_bits = INITIAL_ZONE_BITS; - newdb.hdr.v.free_buckets = INITIAL_FREE_BUCKETS; - newdb.hdr.v.free_off = offsetof(struct new_database, free); - set_header(tdb, &newdb.frec, 0, - sizeof(newdb.free), sizeof(newdb.free), 0); - memset(newdb.free, 0, sizeof(newdb.free)); - + newdb.hdr.recovery = 0; + memset(newdb.hdr.reserved, 0, sizeof(newdb.hdr.reserved)); /* Initial hashes are empty. */ - newdb.hdr.v.hash_bits = INITIAL_HASH_BITS; - newdb.hdr.v.hash_off = offsetof(struct new_database, hash); - set_header(tdb, &newdb.hrec, 0, - sizeof(newdb.hash), sizeof(newdb.hash), 0); - memset(newdb.hash, 0, sizeof(newdb.hash)); + memset(newdb.hdr.hashtable, 0, sizeof(newdb.hdr.hashtable)); + + /* Free is empty. */ + newdb.hdr.free_table = offsetof(struct new_database, ftable); + memset(&newdb.ftable, 0, sizeof(newdb.ftable)); + set_header(NULL, &newdb.ftable.hdr, TDB_FTABLE_MAGIC, 0, + sizeof(newdb.ftable) - sizeof(newdb.ftable.hdr), + sizeof(newdb.ftable) - sizeof(newdb.ftable.hdr), 0); /* Magic food */ memset(newdb.hdr.magic_food, 0, sizeof(newdb.hdr.magic_food)); strcpy(newdb.hdr.magic_food, TDB_MAGIC_FOOD); /* This creates an endian-converted database, as if read from disk */ + magic_len = sizeof(newdb.hdr.magic_food); tdb_convert(tdb, - (char *)&newdb.hdr + magic_off, - sizeof(newdb) - magic_off); + (char *)&newdb.hdr + magic_len, sizeof(newdb) - magic_len); - tdb->header = newdb.hdr; + *hdr = newdb.hdr; if (tdb->flags & TDB_INTERNAL) { tdb->map_size = sizeof(newdb); tdb->map_ptr = malloc(tdb->map_size); if (!tdb->map_ptr) { - tdb->ecode = TDB_ERR_OOM; + tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_FATAL, + "tdb_new_database: failed to allocate"); return -1; } memcpy(tdb->map_ptr, &newdb, tdb->map_size); @@ -197,7 +136,9 @@ static int tdb_new_database(struct tdb_context *tdb) return -1; if (!tdb_pwrite_all(tdb->fd, &newdb, sizeof(newdb), 0)) { - tdb->ecode = TDB_ERR_IO; + tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_FATAL, + "tdb_new_database: failed to write: %s", + strerror(errno)); return -1; } return 0; @@ -209,45 +150,64 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, { struct tdb_context *tdb; struct stat st; - int save_errno; + int saved_errno = 0; uint64_t hash_test; unsigned v; + struct tdb_header hdr; + struct tdb_attribute_seed *seed = NULL; tdb = malloc(sizeof(*tdb)); if (!tdb) { /* Can't log this */ errno = ENOMEM; - goto fail; + return NULL; } tdb->name = NULL; tdb->map_ptr = NULL; + tdb->direct_access = 0; tdb->fd = -1; - /* map_size will be set below. */ + tdb->map_size = sizeof(struct tdb_header); tdb->ecode = TDB_SUCCESS; - /* header will be read in below. */ - tdb->header_uptodate = false; tdb->flags = tdb_flags; - tdb->log = null_log_fn; - tdb->log_priv = NULL; - tdb->khash = jenkins_hash; - tdb->hash_priv = NULL; + tdb->logfn = NULL; tdb->transaction = NULL; - /* last_zone will be set below. */ + tdb->stats = NULL; + tdb->access = NULL; + tdb_hash_init(tdb); tdb_io_init(tdb); tdb_lock_init(tdb); - /* FIXME */ - if (attr) { - tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, - "tdb_open: attributes not yet supported\n"); - errno = EINVAL; - goto fail; + while (attr) { + switch (attr->base.attr) { + case TDB_ATTRIBUTE_LOG: + tdb->logfn = attr->log.log_fn; + tdb->log_private = attr->log.log_private; + break; + case TDB_ATTRIBUTE_HASH: + tdb->khash = attr->hash.hash_fn; + tdb->hash_priv = attr->hash.hash_private; + break; + case TDB_ATTRIBUTE_SEED: + seed = &attr->seed; + break; + case TDB_ATTRIBUTE_STATS: + tdb->stats = &attr->stats; + /* They have stats we don't know about? Tell them. */ + if (tdb->stats->size > sizeof(attr->stats)) + tdb->stats->size = sizeof(attr->stats); + break; + default: + tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_DEBUG_ERROR, + "tdb_open: unknown attribute type %u", + attr->base.attr); + goto fail; + } + attr = attr->base.next; } if ((open_flags & O_ACCMODE) == O_WRONLY) { - tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, - "tdb_open: can't open tdb %s write-only\n", name); - errno = EINVAL; + tdb_logerr(tdb, TDB_ERR_EINVAL, TDB_DEBUG_ERROR, + "tdb_open: can't open tdb %s write-only", name); goto fail; } @@ -255,27 +215,31 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, tdb->read_only = true; /* read only databases don't do locking */ tdb->flags |= TDB_NOLOCK; - } else + tdb->mmap_flags = PROT_READ; + } else { tdb->read_only = false; + tdb->mmap_flags = PROT_READ | PROT_WRITE; + } /* internal databases don't need any of the rest. */ if (tdb->flags & TDB_INTERNAL) { tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP); - if (tdb_new_database(tdb) != 0) { - tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, - "tdb_open: tdb_new_database failed!"); + if (tdb_new_database(tdb, seed, &hdr) != 0) { goto fail; } - TEST_IT(tdb->flags & TDB_CONVERT); - tdb_convert(tdb, &tdb->header, sizeof(tdb->header)); + tdb_convert(tdb, &hdr.hash_seed, sizeof(hdr.hash_seed)); + tdb->hash_seed = hdr.hash_seed; + tdb_ftable_init(tdb); return tdb; } if ((tdb->fd = open(name, open_flags, mode)) == -1) { - tdb->log(tdb, TDB_DEBUG_WARNING, tdb->log_priv, - "tdb_open: could not open file %s: %s\n", - name, strerror(errno)); - goto fail; /* errno set by open(2) */ + /* errno set by open(2) */ + saved_errno = errno; + tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR, + "tdb_open: could not open file %s: %s", + name, strerror(errno)); + goto fail; } /* on exec, don't inherit the fd */ @@ -283,82 +247,115 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC); /* ensure there is only one process initialising at once */ - if (tdb_lock_open(tdb) == -1) { - tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, - "tdb_open: failed to get open lock on %s: %s\n", - name, strerror(errno)); - goto fail; /* errno set by tdb_brlock */ + if (tdb_lock_open(tdb, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK) == -1) { + /* errno set by tdb_brlock */ + saved_errno = errno; + goto fail; } - if (!tdb_pread_all(tdb->fd, &tdb->header, sizeof(tdb->header), 0) - || strcmp(tdb->header.magic_food, TDB_MAGIC_FOOD) != 0) { - if (!(open_flags & O_CREAT) || tdb_new_database(tdb) == -1) { - if (errno == 0) { - errno = EIO; /* ie bad format or something */ - } + if (!tdb_pread_all(tdb->fd, &hdr, sizeof(hdr), 0) + || strcmp(hdr.magic_food, TDB_MAGIC_FOOD) != 0) { + if (!(open_flags & O_CREAT)) { + tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR, + "tdb_open: %s is not a tdb file", name); + goto fail; + } + if (tdb_new_database(tdb, seed, &hdr) == -1) { goto fail; } - } else if (tdb->header.version != TDB_VERSION) { - if (tdb->header.version == bswap_64(TDB_VERSION)) + } else if (hdr.version != TDB_VERSION) { + if (hdr.version == bswap_64(TDB_VERSION)) tdb->flags |= TDB_CONVERT; else { /* wrong version */ - tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, - "tdb_open: %s is unknown version 0x%llx\n", - name, (long long)tdb->header.version); - errno = EIO; + tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR, + "tdb_open: %s is unknown version 0x%llx", + name, (long long)hdr.version); goto fail; } } - tdb_convert(tdb, &tdb->header, sizeof(tdb->header)); + tdb_convert(tdb, &hdr, sizeof(hdr)); + tdb->hash_seed = hdr.hash_seed; hash_test = TDB_HASH_MAGIC; - hash_test = tdb->khash(&hash_test, sizeof(hash_test), - tdb->header.hash_seed, tdb->hash_priv); - if (tdb->header.hash_test != hash_test) { + hash_test = tdb_hash(tdb, &hash_test, sizeof(hash_test)); + if (hdr.hash_test != hash_test) { /* wrong hash variant */ - tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, - "tdb_open: %s uses a different hash function\n", - name); - errno = EIO; + tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR, + "tdb_open: %s uses a different hash function", + name); goto fail; } - if (fstat(tdb->fd, &st) == -1) + if (fstat(tdb->fd, &st) == -1) { + saved_errno = errno; + tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR, + "tdb_open: could not stat open %s: %s", + name, strerror(errno)); goto fail; + } /* Is it already in the open list? If so, fail. */ if (tdb_already_open(st.st_dev, st.st_ino)) { /* FIXME */ - tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, - "tdb_open: %s (%d,%d) is already open in this process\n", - name, (int)st.st_dev, (int)st.st_ino); - errno = EBUSY; + tdb_logerr(tdb, TDB_ERR_NESTING, TDB_DEBUG_ERROR, + "tdb_open: %s (%d,%d) is already open in this" + " process", + name, (int)st.st_dev, (int)st.st_ino); goto fail; } tdb->name = strdup(name); if (!tdb->name) { - errno = ENOMEM; + tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_ERROR, + "tdb_open: failed to allocate name"); goto fail; } - tdb->map_size = st.st_size; tdb->device = st.st_dev; tdb->inode = st.st_ino; - tdb_mmap(tdb); tdb_unlock_open(tdb); - tdb_zone_init(tdb); + + /* This make sure we have current map_size and mmap. */ + tdb->methods->oob(tdb, tdb->map_size + 1, true); + + /* Now it's fully formed, recover if necessary. */ + if (tdb_needs_recovery(tdb) && tdb_lock_and_recover(tdb) == -1) { + goto fail; + } + + if (tdb_ftable_init(tdb) == -1) + goto fail; tdb->next = tdbs; tdbs = tdb; return tdb; fail: - save_errno = errno; - - if (!tdb) - return NULL; + /* Map ecode to some logical errno. */ + if (!saved_errno) { + switch (tdb->ecode) { + case TDB_ERR_CORRUPT: + case TDB_ERR_IO: + saved_errno = EIO; + break; + case TDB_ERR_LOCK: + saved_errno = EWOULDBLOCK; + break; + case TDB_ERR_OOM: + saved_errno = ENOMEM; + break; + case TDB_ERR_EINVAL: + saved_errno = EINVAL; + break; + case TDB_ERR_NESTING: + saved_errno = EBUSY; + break; + default: + saved_errno = EINVAL; + break; + } + } #ifdef TDB_TRACE close(tdb->tracefd); @@ -372,145 +369,14 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags, free((char *)tdb->name); if (tdb->fd != -1) if (close(tdb->fd) != 0) - tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv, - "tdb_open: failed to close tdb->fd" - " on error!\n"); + tdb_logerr(tdb, TDB_ERR_IO, TDB_DEBUG_ERROR, + "tdb_open: failed to close tdb->fd" + " on error!"); free(tdb); - errno = save_errno; + errno = saved_errno; return NULL; } -static tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list) -{ - return tdb->header.v.hash_off - + ((list & ((1ULL << tdb->header.v.hash_bits) - 1)) - * sizeof(tdb_off_t)); -} - -/* Returns 0 if the entry is a zero (definitely not a match). - * Returns a valid entry offset if it's a match. Fills in rec. - * Otherwise returns TDB_OFF_ERR: keep searching. */ -static tdb_off_t entry_matches(struct tdb_context *tdb, - uint64_t list, - uint64_t hash, - const struct tdb_data *key, - struct tdb_used_record *rec) -{ - tdb_off_t off; - uint64_t keylen; - const unsigned char *rkey; - - off = tdb_read_off(tdb, tdb->header.v.hash_off - + list * sizeof(tdb_off_t)); - if (off == 0 || off == TDB_OFF_ERR) - return off; - -#if 0 /* FIXME: Check other bits. */ - unsigned int bits, bitmask, hoffextra; - /* Bottom three bits show how many extra hash bits. */ - bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1; - bitmask = (1 << bits)-1; - hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask); - uint64_t hextra = hash >> tdb->header.v.hash_bits; - if ((hextra & bitmask) != hoffextra) - return TDB_OFF_ERR; - off &= ~...; -#endif - - if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1) - return TDB_OFF_ERR; - - /* FIXME: check extra bits in header! */ - keylen = rec_key_length(rec); - if (keylen != key->dsize) - return TDB_OFF_ERR; - - rkey = tdb_access_read(tdb, off + sizeof(*rec), keylen, false); - if (!rkey) - return TDB_OFF_ERR; - if (memcmp(rkey, key->dptr, keylen) != 0) - off = TDB_OFF_ERR; - tdb_access_release(tdb, rkey); - return off; -} - -/* FIXME: Optimize? */ -static void unlock_lists(struct tdb_context *tdb, - tdb_off_t list, tdb_len_t num, - int ltype) -{ - tdb_off_t i; - - for (i = list; i < list + num; i++) - tdb_unlock_list(tdb, i, ltype); -} - -/* FIXME: Optimize? */ -static int lock_lists(struct tdb_context *tdb, - tdb_off_t list, tdb_len_t num, - int ltype) -{ - tdb_off_t i; - - for (i = list; i < list + num; i++) { - if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT) != 0) { - unlock_lists(tdb, list, i - list, ltype); - return -1; - } - } - return 0; -} - -/* We lock hashes up to the next empty offset. We already hold the - * lock on the start bucket, but we may need to release and re-grab - * it. If we fail, we hold no locks at all! */ -static tdb_len_t relock_hash_to_zero(struct tdb_context *tdb, - tdb_off_t start, int ltype) -{ - tdb_len_t num, len, pre_locks; - -again: - num = 1ULL << tdb->header.v.hash_bits; - len = tdb_find_zero_off(tdb, hash_off(tdb, start), num - start); - if (unlikely(len == num - start)) { - /* We hit the end of the hash range. Drop lock: we have - to lock start of hash first. */ - tdb_unlock_list(tdb, start, ltype); - /* Grab something, so header is stable. */ - if (tdb_lock_list(tdb, 0, ltype, TDB_LOCK_WAIT)) - return TDB_OFF_ERR; - len = tdb_find_zero_off(tdb, hash_off(tdb, 0), num); - if (lock_lists(tdb, 1, len, ltype) == -1) { - tdb_unlock_list(tdb, 0, ltype); - return TDB_OFF_ERR; - } - pre_locks = len; - len = num - start; - } else { - /* We already have lock on start. */ - start++; - pre_locks = 0; - } - if (unlikely(lock_lists(tdb, start, len, ltype) == -1)) { - if (pre_locks) - unlock_lists(tdb, 0, pre_locks, ltype); - else - tdb_unlock_list(tdb, start, ltype); - return TDB_OFF_ERR; - } - - /* Now, did we lose the race, and it's not zero any more? */ - if (unlikely(tdb_read_off(tdb, hash_off(tdb, pre_locks + len)) != 0)) { - unlock_lists(tdb, 0, pre_locks, ltype); - /* Leave the start locked, as expected. */ - unlock_lists(tdb, start + 1, len - 1, ltype); - goto again; - } - - return pre_locks + len; -} - -/* FIXME: modify, don't rewrite! */ static int update_rec_hdr(struct tdb_context *tdb, tdb_off_t off, tdb_len_t keylen, @@ -518,127 +384,68 @@ static int update_rec_hdr(struct tdb_context *tdb, struct tdb_used_record *rec, uint64_t h) { - uint64_t room = rec_data_length(rec) + rec_extra_padding(rec); + uint64_t dataroom = rec_data_length(rec) + rec_extra_padding(rec); - if (set_header(tdb, rec, keylen, datalen, room - datalen, h)) + if (set_header(tdb, rec, TDB_USED_MAGIC, keylen, datalen, + keylen + dataroom, h)) return -1; return tdb_write_convert(tdb, off, rec, sizeof(*rec)); } -/* If we fail, others will try after us. */ -static void enlarge_hash(struct tdb_context *tdb) +/* Returns -1 on error, 0 on OK */ +static int replace_data(struct tdb_context *tdb, + struct hash_info *h, + struct tdb_data key, struct tdb_data dbuf, + tdb_off_t old_off, tdb_len_t old_room, + bool growing) { - tdb_off_t newoff, oldoff, i; - tdb_len_t hlen; - uint64_t h, num = 1ULL << tdb->header.v.hash_bits; - struct tdb_used_record pad, *r; + tdb_off_t new_off; - /* FIXME: We should do this without holding locks throughout. */ - if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1) - return; + /* Allocate a new record. */ + new_off = alloc(tdb, key.dsize, dbuf.dsize, h->h, TDB_USED_MAGIC, + growing); + if (unlikely(new_off == TDB_OFF_ERR)) + return -1; - /* Someone else enlarged for us? Nothing to do. */ - if ((1ULL << tdb->header.v.hash_bits) != num) - goto unlock; - - /* Allocate our new array. */ - hlen = num * sizeof(tdb_off_t) * 2; - newoff = alloc(tdb, 0, hlen, 0, false); - if (unlikely(newoff == TDB_OFF_ERR)) - goto unlock; - if (unlikely(newoff == 0)) { - if (tdb_expand(tdb, 0, hlen, false) == -1) - goto unlock; - newoff = alloc(tdb, 0, hlen, 0, false); - if (newoff == TDB_OFF_ERR || newoff == 0) - goto unlock; - } - /* Step over record header! */ - newoff += sizeof(struct tdb_used_record); - - /* Starts all zero. */ - if (zero_out(tdb, newoff, hlen) == -1) - goto unlock; - - /* FIXME: If the space before is empty, we know this is in its ideal - * location. Or steal a bit from the pointer to avoid rehash. */ - for (i = tdb_find_nonzero_off(tdb, hash_off(tdb, 0), num); - i < num; - i += tdb_find_nonzero_off(tdb, hash_off(tdb, i), num - i)) { - tdb_off_t off; - off = tdb_read_off(tdb, hash_off(tdb, i)); - if (unlikely(off == TDB_OFF_ERR)) - goto unlock; - if (unlikely(!off)) { - tdb->ecode = TDB_ERR_CORRUPT; - tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, - "find_bucket_and_lock: zero hash bucket!\n"); - goto unlock; - } - h = hash_record(tdb, off); - /* FIXME: Encode extra hash bits! */ - if (tdb_write_off(tdb, newoff - + (h & ((num * 2) - 1)) * sizeof(uint64_t), - off) == -1) - goto unlock; + /* We didn't like the existing one: remove it. */ + if (old_off) { + add_stat(tdb, frees, 1); + add_free_record(tdb, old_off, + sizeof(struct tdb_used_record) + + key.dsize + old_room); + if (replace_in_hash(tdb, h, new_off) == -1) + return -1; + } else { + if (add_to_hash(tdb, h, new_off) == -1) + return -1; } - /* Free up old hash. */ - oldoff = tdb->header.v.hash_off - sizeof(*r); - r = tdb_get(tdb, oldoff, &pad, sizeof(*r)); - if (!r) - goto unlock; - add_free_record(tdb, oldoff, - sizeof(*r)+rec_data_length(r)+rec_extra_padding(r)); - - /* Now we write the modified header. */ - tdb->header.v.hash_bits++; - tdb->header.v.hash_off = newoff; - write_header(tdb); -unlock: - tdb_allrecord_unlock(tdb, F_WRLCK); + new_off += sizeof(struct tdb_used_record); + if (tdb->methods->write(tdb, new_off, key.dptr, key.dsize) == -1) + return -1; + + new_off += key.dsize; + if (tdb->methods->write(tdb, new_off, dbuf.dptr, dbuf.dsize) == -1) + return -1; + + /* FIXME: tdb_increment_seqnum(tdb); */ + return 0; } int tdb_store(struct tdb_context *tdb, struct tdb_data key, struct tdb_data dbuf, int flag) { - tdb_off_t new_off, off, old_bucket, start, num_locks = 1; + struct hash_info h; + tdb_off_t off; + tdb_len_t old_room = 0; struct tdb_used_record rec; - uint64_t h; - bool growing = false; - - h = tdb_hash(tdb, key.dptr, key.dsize); + int ret; - /* FIXME: can we avoid locks for some fast paths? */ - start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT); - if (start == TDB_OFF_ERR) + off = find_and_lock(tdb, key, F_WRLCK, &h, &rec, NULL); + if (unlikely(off == TDB_OFF_ERR)) return -1; - /* Fast path. */ - old_bucket = start; - off = entry_matches(tdb, start, h, &key, &rec); - if (unlikely(off == TDB_OFF_ERR)) { - /* Slow path, need to grab more locks and search. */ - tdb_off_t i; - - /* Warning: this may drop the lock! Does that on error. */ - num_locks = relock_hash_to_zero(tdb, start, F_WRLCK); - if (num_locks == TDB_OFF_ERR) - return -1; - - for (i = start; i < start + num_locks; i++) { - off = entry_matches(tdb, i, h, &key, &rec); - /* Empty entry or we found it? */ - if (off == 0 || off != TDB_OFF_ERR) { - old_bucket = i; - break; - } - } - if (i == start + num_locks) - off = 0; - } - /* Now we have lock on this hash bucket. */ if (flag == TDB_INSERT) { if (off) { @@ -647,18 +454,22 @@ int tdb_store(struct tdb_context *tdb, } } else { if (off) { - if (rec_data_length(&rec) + rec_extra_padding(&rec) - >= dbuf.dsize) { - new_off = off; + old_room = rec_data_length(&rec) + + rec_extra_padding(&rec); + if (old_room >= dbuf.dsize) { + /* Can modify in-place. Easy! */ if (update_rec_hdr(tdb, off, key.dsize, dbuf.dsize, - &rec, h)) + &rec, h.h)) + goto fail; + if (tdb->methods->write(tdb, off + sizeof(rec) + + key.dsize, + dbuf.dptr, dbuf.dsize)) goto fail; - goto write; + tdb_unlock_hashes(tdb, h.hlock_start, + h.hlock_range, F_WRLCK); + return 0; } - /* FIXME: See if right record is free? */ - /* Hint to allocator that we've realloced. */ - growing = true; } else { if (flag == TDB_MODIFY) { /* if the record doesn't exist and we @@ -670,210 +481,130 @@ int tdb_store(struct tdb_context *tdb, } } - /* Allocate a new record. */ - new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing); - if (new_off == 0) { - unlock_lists(tdb, start, num_locks, F_WRLCK); - /* Expand, then try again... */ - if (tdb_expand(tdb, key.dsize, dbuf.dsize, growing) == -1) - return -1; - return tdb_store(tdb, key, dbuf, flag); - } + /* If we didn't use the old record, this implies we're growing. */ + ret = replace_data(tdb, &h, key, dbuf, off, old_room, off != 0); + tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK); + return ret; + +fail: + tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK); + return -1; +} + +int tdb_append(struct tdb_context *tdb, + struct tdb_data key, struct tdb_data dbuf) +{ + struct hash_info h; + tdb_off_t off; + struct tdb_used_record rec; + tdb_len_t old_room = 0, old_dlen; + unsigned char *newdata; + struct tdb_data new_dbuf; + int ret; + + off = find_and_lock(tdb, key, F_WRLCK, &h, &rec, NULL); + if (unlikely(off == TDB_OFF_ERR)) + return -1; - /* We didn't like the existing one: remove it. */ if (off) { - add_free_record(tdb, off, sizeof(struct tdb_used_record) - + rec_key_length(&rec) - + rec_data_length(&rec) - + rec_extra_padding(&rec)); - } + old_dlen = rec_data_length(&rec); + old_room = old_dlen + rec_extra_padding(&rec); -write: - /* FIXME: Encode extra hash bits! */ - if (tdb_write_off(tdb, hash_off(tdb, old_bucket), new_off) == -1) - goto fail; + /* Fast path: can append in place. */ + if (rec_extra_padding(&rec) >= dbuf.dsize) { + if (update_rec_hdr(tdb, off, key.dsize, + old_dlen + dbuf.dsize, &rec, h.h)) + goto fail; - off = new_off + sizeof(struct tdb_used_record); - if (tdb->methods->write(tdb, off, key.dptr, key.dsize) == -1) - goto fail; - off += key.dsize; - if (tdb->methods->write(tdb, off, dbuf.dptr, dbuf.dsize) == -1) - goto fail; + off += sizeof(rec) + key.dsize + old_dlen; + if (tdb->methods->write(tdb, off, dbuf.dptr, + dbuf.dsize) == -1) + goto fail; - /* FIXME: tdb_increment_seqnum(tdb); */ - unlock_lists(tdb, start, num_locks, F_WRLCK); + /* FIXME: tdb_increment_seqnum(tdb); */ + tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, + F_WRLCK); + return 0; + } - /* FIXME: by simple simulation, this approximated 60% full. - * Check in real case! */ - if (unlikely(num_locks > 4 * tdb->header.v.hash_bits - 31)) - enlarge_hash(tdb); + /* Slow path. */ + newdata = malloc(key.dsize + old_dlen + dbuf.dsize); + if (!newdata) { + tdb_logerr(tdb, TDB_ERR_OOM, TDB_DEBUG_FATAL, + "tdb_append: failed to allocate %zu bytes", + (size_t)(key.dsize+old_dlen+dbuf.dsize)); + goto fail; + } + if (tdb->methods->read(tdb, off + sizeof(rec) + key.dsize, + newdata, old_dlen) != 0) { + free(newdata); + goto fail; + } + memcpy(newdata + old_dlen, dbuf.dptr, dbuf.dsize); + new_dbuf.dptr = newdata; + new_dbuf.dsize = old_dlen + dbuf.dsize; + } else { + newdata = NULL; + new_dbuf = dbuf; + } - return 0; + /* If they're using tdb_append(), it implies they're growing record. */ + ret = replace_data(tdb, &h, key, new_dbuf, off, old_room, true); + tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK); + free(newdata); + + return ret; fail: - unlock_lists(tdb, start, num_locks, F_WRLCK); + tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK); return -1; } struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key) { - tdb_off_t off, start, num_locks = 1; + tdb_off_t off; struct tdb_used_record rec; - uint64_t h; + struct hash_info h; struct tdb_data ret; - h = tdb_hash(tdb, key.dptr, key.dsize); - - /* FIXME: can we avoid locks for some fast paths? */ - start = tdb_lock_list(tdb, h, F_RDLCK, TDB_LOCK_WAIT); - if (start == TDB_OFF_ERR) + off = find_and_lock(tdb, key, F_RDLCK, &h, &rec, NULL); + if (unlikely(off == TDB_OFF_ERR)) return tdb_null; - /* Fast path. */ - off = entry_matches(tdb, start, h, &key, &rec); - if (unlikely(off == TDB_OFF_ERR)) { - /* Slow path, need to grab more locks and search. */ - tdb_off_t i; - - /* Warning: this may drop the lock! Does that on error. */ - num_locks = relock_hash_to_zero(tdb, start, F_RDLCK); - if (num_locks == TDB_OFF_ERR) - return tdb_null; - - for (i = start; i < start + num_locks; i++) { - off = entry_matches(tdb, i, h, &key, &rec); - /* Empty entry or we found it? */ - if (off == 0 || off != TDB_OFF_ERR) - break; - } - if (i == start + num_locks) - off = 0; - } - if (!off) { - unlock_lists(tdb, start, num_locks, F_RDLCK); tdb->ecode = TDB_ERR_NOEXIST; - return tdb_null; + ret = tdb_null; + } else { + ret.dsize = rec_data_length(&rec); + ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize, + ret.dsize); } - ret.dsize = rec_data_length(&rec); - ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize, - ret.dsize); - unlock_lists(tdb, start, num_locks, F_RDLCK); + tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_RDLCK); return ret; } -static int hash_add(struct tdb_context *tdb, uint64_t h, tdb_off_t off) -{ - tdb_off_t i, hoff, len, num; - - /* Look for next space. */ - i = (h & ((1ULL << tdb->header.v.hash_bits) - 1)); - len = (1ULL << tdb->header.v.hash_bits) - i; - num = tdb_find_zero_off(tdb, hash_off(tdb, i), len); - - if (unlikely(num == len)) { - /* We wrapped. Look through start of hash table. */ - hoff = hash_off(tdb, 0); - len = (1ULL << tdb->header.v.hash_bits); - num = tdb_find_zero_off(tdb, hoff, len); - if (i == len) { - tdb->ecode = TDB_ERR_CORRUPT; - tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv, - "hash_add: full hash table!\n"); - return -1; - } - } - /* FIXME: Encode extra hash bits! */ - return tdb_write_off(tdb, hash_off(tdb, i + num), off); -} - int tdb_delete(struct tdb_context *tdb, struct tdb_data key) { - tdb_off_t i, old_bucket, off, start, num_locks = 1; + tdb_off_t off; struct tdb_used_record rec; - uint64_t h; - - h = tdb_hash(tdb, key.dptr, key.dsize); + struct hash_info h; - /* FIXME: can we avoid locks for some fast paths? */ - start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT); - if (start == TDB_OFF_ERR) + off = find_and_lock(tdb, key, F_WRLCK, &h, &rec, NULL); + if (unlikely(off == TDB_OFF_ERR)) return -1; - /* Fast path. */ - old_bucket = start; - off = entry_matches(tdb, start, h, &key, &rec); - if (off && off != TDB_OFF_ERR) { - /* We can only really fastpath delete if next bucket - * is 0. Note that we haven't locked it, but our lock - * on this bucket stops anyone overflowing into it - * while we look. */ - if (tdb_read_off(tdb, hash_off(tdb, h+1)) == 0) - goto delete; - /* Slow path. */ - off = TDB_OFF_ERR; - } - - if (unlikely(off == TDB_OFF_ERR)) { - /* Slow path, need to grab more locks and search. */ - tdb_off_t i; - - /* Warning: this may drop the lock! Does that on error. */ - num_locks = relock_hash_to_zero(tdb, start, F_WRLCK); - if (num_locks == TDB_OFF_ERR) - return -1; - - for (i = start; i < start + num_locks; i++) { - off = entry_matches(tdb, i, h, &key, &rec); - /* Empty entry or we found it? */ - if (off == 0 || off != TDB_OFF_ERR) { - old_bucket = i; - break; - } - } - if (i == start + num_locks) - off = 0; - } - if (!off) { - unlock_lists(tdb, start, num_locks, F_WRLCK); + tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK); tdb->ecode = TDB_ERR_NOEXIST; return -1; } -delete: - /* This actually unlinks it. */ - if (tdb_write_off(tdb, hash_off(tdb, old_bucket), 0) == -1) + if (delete_from_hash(tdb, &h) == -1) goto unlock_err; - /* Rehash anything following. */ - for (i = hash_off(tdb, old_bucket+1); - i != hash_off(tdb, h + num_locks); - i += sizeof(tdb_off_t)) { - tdb_off_t off2; - uint64_t h2; - - off2 = tdb_read_off(tdb, i); - if (unlikely(off2 == TDB_OFF_ERR)) - goto unlock_err; - - /* Maybe use a bit to indicate it is in ideal place? */ - h2 = hash_record(tdb, off2); - /* Is it happy where it is? */ - if (hash_off(tdb, h2) == i) - continue; - - /* Remove it. */ - if (tdb_write_off(tdb, i, 0) == -1) - goto unlock_err; - - /* Rehash it. */ - if (hash_add(tdb, h2, off2) == -1) - goto unlock_err; - } - /* Free the deleted entry. */ + add_stat(tdb, frees, 1); if (add_free_record(tdb, off, sizeof(struct tdb_used_record) + rec_key_length(&rec) @@ -881,11 +612,11 @@ delete: + rec_extra_padding(&rec)) != 0) goto unlock_err; - unlock_lists(tdb, start, num_locks, F_WRLCK); + tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK); return 0; unlock_err: - unlock_lists(tdb, start, num_locks, F_WRLCK); + tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_WRLCK); return -1; } @@ -894,12 +625,11 @@ int tdb_close(struct tdb_context *tdb) struct tdb_context **i; int ret = 0; - /* FIXME: + tdb_trace(tdb, "tdb_close"); + if (tdb->transaction) { tdb_transaction_cancel(tdb); } - */ - tdb_trace(tdb, "tdb_close"); if (tdb->map_ptr) { if (tdb->flags & TDB_INTERNAL) @@ -930,7 +660,60 @@ int tdb_close(struct tdb_context *tdb) return ret; } -enum TDB_ERROR tdb_error(struct tdb_context *tdb) +enum TDB_ERROR tdb_error(const struct tdb_context *tdb) { return tdb->ecode; } + +const char *tdb_errorstr(const struct tdb_context *tdb) +{ + /* Gcc warns if you miss a case in the switch, so use that. */ + switch (tdb->ecode) { + case TDB_SUCCESS: return "Success"; + case TDB_ERR_CORRUPT: return "Corrupt database"; + case TDB_ERR_IO: return "IO Error"; + case TDB_ERR_LOCK: return "Locking error"; + case TDB_ERR_OOM: return "Out of memory"; + case TDB_ERR_EXISTS: return "Record exists"; + case TDB_ERR_NESTING: return "Transaction already started"; + case TDB_ERR_EINVAL: return "Invalid parameter"; + case TDB_ERR_NOEXIST: return "Record does not exist"; + case TDB_ERR_RDONLY: return "write not permitted"; + } + return "Invalid error code"; +} + +void COLD tdb_logerr(struct tdb_context *tdb, + enum TDB_ERROR ecode, + enum tdb_debug_level level, + const char *fmt, ...) +{ + char *message; + va_list ap; + size_t len; + /* tdb_open paths care about errno, so save it. */ + int saved_errno = errno; + + tdb->ecode = ecode; + + if (!tdb->logfn) + return; + + /* FIXME: Doesn't assume asprintf. */ + va_start(ap, fmt); + len = vsnprintf(NULL, 0, fmt, ap); + va_end(ap); + + message = malloc(len + 1); + if (!message) { + tdb->logfn(tdb, level, tdb->log_private, + "out of memory formatting message"); + return; + } + va_start(ap, fmt); + len = vsprintf(message, fmt, ap); + va_end(ap); + tdb->logfn(tdb, level, tdb->log_private, message); + free(message); + errno = saved_errno; +}