+static bool starts_transaction(const struct op *op)
+{
+ return op->type == OP_TDB_TRANSACTION_START;
+}
+
+static bool in_transaction(const struct op op[], unsigned int i)
+{
+ return op[i].group_start && starts_transaction(&op[op[i].group_start]);
+}
+
+static bool successful_transaction(const struct op *op)
+{
+ return starts_transaction(op)
+ && op[op->group_len].type == OP_TDB_TRANSACTION_COMMIT;
+}
+
+static bool starts_traverse(const struct op *op)
+{
+ return op->type == OP_TDB_TRAVERSE_START
+ || op->type == OP_TDB_TRAVERSE_READ_START;
+}
+
+static bool in_traverse(const struct op op[], unsigned int i)
+{
+ return op[i].group_start && starts_traverse(&op[op[i].group_start]);
+}
+
+static bool starts_chainlock(const struct op *op)
+{
+ return op->type == OP_TDB_CHAINLOCK_READ
+ || op->type == OP_TDB_CHAINLOCK;
+}
+
+static bool in_chainlock(const struct op op[], unsigned int i)
+{
+ return op[i].group_start && starts_chainlock(&op[op[i].group_start]);
+}
+
+/* What's the data after this op? pre if nothing changed. */
+static const TDB_DATA *gives(const TDB_DATA *key, const TDB_DATA *pre,
+ const struct op *op)
+{
+ if (starts_transaction(op) || starts_chainlock(op)) {
+ unsigned int i;
+
+ /* Cancelled transactions don't change anything. */
+ if (op[op->group_len].type == OP_TDB_TRANSACTION_CANCEL)
+ return pre;
+ assert(op[op->group_len].type == OP_TDB_TRANSACTION_COMMIT
+ || op[op->group_len].type == OP_TDB_CHAINUNLOCK_READ
+ || op[op->group_len].type == OP_TDB_CHAINUNLOCK);
+
+ for (i = 1; i < op->group_len; i++) {
+ /* This skips nested transactions, too */
+ if (key_eq(op[i].key, *key))
+ pre = gives(key, pre, &op[i]);
+ }
+ return pre;
+ }
+
+ /* Failed ops don't change state of db. */
+ if (op->ret < 0)
+ return pre;
+
+ if (op->type == OP_TDB_DELETE || op->type == OP_TDB_WIPE_ALL)
+ return &tdb_null;
+
+ if (op->type == OP_TDB_APPEND)
+ return &op->append.post;
+
+ if (op->type == OP_TDB_STORE)
+ return &op->data;
+
+ return pre;
+}
+
+static struct keyinfo *hash_ops(struct op *op[], unsigned int num_ops[],
+ unsigned int num)
+{
+ unsigned int i, j, h;
+ struct keyinfo *hash;
+
+ hash = talloc_zero_array(op[0], struct keyinfo, total_keys*2);
+ for (i = 0; i < num; i++) {
+ for (j = 1; j < num_ops[i]; j++) {
+ /* We can't do this on allocation, due to realloc. */
+ list_head_init(&op[i][j].post);
+ list_head_init(&op[i][j].pre);
+
+ if (!op[i][j].key.dptr)
+ continue;
+
+ h = hash_key(&op[i][j].key) % (total_keys * 2);
+ while (!key_eq(hash[h].key, op[i][j].key)) {
+ if (!hash[h].key.dptr) {
+ hash[h].key = op[i][j].key;
+ break;
+ }
+ h = (h + 1) % (total_keys * 2);
+ }
+ /* Might as well save some memory if we can. */
+ if (op[i][j].key.dptr != hash[h].key.dptr) {
+ talloc_free(op[i][j].key.dptr);
+ op[i][j].key.dptr = hash[h].key.dptr;
+ }
+ hash[h].user = talloc_realloc(hash, hash[h].user,
+ struct op_desc,
+ hash[h].num_users+1);
+
+ /* If it's in a transaction, it's the transaction which
+ * matters from an analysis POV. */
+ if (in_transaction(op[i], j)
+ || in_chainlock(op[i], j)) {
+ unsigned start = op[i][j].group_start;
+
+ /* Don't include twice. */
+ if (hash[h].num_users
+ && hash[h].user[hash[h].num_users-1].file
+ == i
+ && hash[h].user[hash[h].num_users-1].op_num
+ == start)
+ continue;
+
+ hash[h].user[hash[h].num_users].op_num = start;
+ } else
+ hash[h].user[hash[h].num_users].op_num = j;
+ hash[h].user[hash[h].num_users].file = i;
+ hash[h].num_users++;
+ }
+ }
+
+ return hash;
+}
+
+static bool satisfies(const TDB_DATA *key, const TDB_DATA *data,
+ const struct op *op)
+{
+ const TDB_DATA *need = NULL;
+
+ if (starts_transaction(op) || starts_chainlock(op)) {
+ unsigned int i;
+
+ /* Look through for an op in this transaction which
+ * needs this key. */
+ for (i = 1; i < op->group_len; i++) {
+ if (key_eq(op[i].key, *key)) {
+ need = needs(&op[i]);
+ /* tdb_exists() is special: there might be
+ * something in the transaction with more
+ * specific requirements. Other ops don't have
+ * specific requirements (eg. store or delete),
+ * but they change the value so we can't get
+ * more information from future ops. */
+ if (op[i].type != OP_TDB_EXISTS)
+ break;
+ }
+ }
+ } else
+ need = needs(op);
+
+ /* Don't need anything? Cool. */
+ if (!need)
+ return true;
+
+ /* This should be tdb_null or a real value. */
+ assert(data != &must_exist);
+ assert(data != &must_not_exist);
+ assert(data != ¬_exists_or_empty);
+
+ /* Must not exist? data must not exist. */
+ if (need == &must_not_exist)
+ return data == &tdb_null;
+
+ /* Must exist? */
+ if (need == &must_exist)
+ return data != &tdb_null;
+
+ /* Either noexist or empty. */
+ if (need == ¬_exists_or_empty)
+ return data->dsize == 0;
+
+ /* Needs something specific. */
+ return key_eq(*data, *need);
+}
+
+static void move_to_front(struct op_desc res[], unsigned off, unsigned elem)
+{
+ if (elem != off) {
+ struct op_desc tmp = res[elem];
+ memmove(res + off + 1, res + off, (elem - off)*sizeof(res[0]));
+ res[off] = tmp;
+ }
+}
+
+static void restore_to_pos(struct op_desc res[], unsigned off, unsigned elem)
+{
+ if (elem != off) {
+ struct op_desc tmp = res[off];
+ memmove(res + off, res + off + 1, (elem - off)*sizeof(res[0]));
+ res[elem] = tmp;
+ }
+}
+
+static bool sort_deps(char *filename[], struct op *op[],
+ struct op_desc res[],
+ unsigned off, unsigned num,
+ const TDB_DATA *key, const TDB_DATA *data,
+ unsigned num_files, unsigned fuzz)
+{
+ unsigned int i, files_done;
+ struct op *this_op;
+ bool done[num_files];
+
+ /* None left? We're sorted. */
+ if (off == num)
+ return true;
+
+ /* Does this make sequence number go backwards? Allow a little fuzz. */
+ if (off > 0) {
+ int seqnum1 = op[res[off-1].file][res[off-1].op_num].seqnum;
+ int seqnum2 = op[res[off].file][res[off].op_num].seqnum;
+
+ if (seqnum1 - seqnum2 > (int)fuzz) {
+#if DEBUG_DEPS
+ printf("Seqnum jump too far (%u -> %u)\n",
+ seqnum1, seqnum2);
+#endif
+ return false;
+ }
+ }
+
+ memset(done, 0, sizeof(done));
+
+ /* Since ops within a trace file are ordered, we just need to figure
+ * out which file to try next. Since we don't take into account
+ * inter-key relationships (which exist by virtue of trace file order),
+ * we minimize the chance of harm by trying to keep in seqnum order. */
+ for (files_done = 0, i = off; i < num && files_done < num_files; i++) {
+ if (done[res[i].file])
+ continue;
+
+ this_op = &op[res[i].file][res[i].op_num];
+
+ /* Is what we have good enough for this op? */
+ if (satisfies(key, data, this_op)) {
+ move_to_front(res, off, i);
+ if (sort_deps(filename, op, res, off+1, num,
+ key, gives(key, data, this_op),
+ num_files, fuzz))
+ return true;
+ restore_to_pos(res, off, i);
+ }
+ done[res[i].file] = true;
+ files_done++;
+ }
+
+ /* No combination worked. */
+ return false;
+}
+
+static void check_dep_sorting(struct op_desc user[], unsigned num_users,
+ unsigned num_files)
+{
+#if DEBUG_DEPS
+ unsigned int i;
+ unsigned minima[num_files];
+
+ memset(minima, 0, sizeof(minima));
+ for (i = 0; i < num_users; i++) {
+ assert(minima[user[i].file] < user[i].op_num);
+ minima[user[i].file] = user[i].op_num;
+ }
+#endif
+}
+
+/* All these ops happen on the same key. Which comes first?
+ *
+ * This can happen both because read ops or failed write ops don't
+ * change sequence number, and also due to race since we access the
+ * number unlocked (the race can cause less detectable ordering problems,
+ * in which case we'll deadlock and report: fix manually in that case).
+ */
+static void figure_deps(char *filename[], struct op *op[],
+ const TDB_DATA *key, struct op_desc user[],
+ unsigned num_users, unsigned num_files)
+{
+ /* We assume database starts empty. */
+ const struct TDB_DATA *data = &tdb_null;
+ unsigned int fuzz;
+
+ /* We prefer to keep strict seqnum order if possible: it's the
+ * most likely. We get more lax if that fails. */
+ for (fuzz = 0; fuzz < 100; fuzz = (fuzz + 1)*2) {
+ if (sort_deps(filename, op, user, 0, num_users, key, data,
+ num_files, fuzz))
+ break;
+ }
+
+ if (fuzz >= 100)
+ fail(filename[user[0].file], user[0].op_num+1,
+ "Could not resolve inter-dependencies");
+
+ check_dep_sorting(user, num_users, num_files);
+}
+
+static void sort_ops(struct keyinfo hash[], char *filename[], struct op *op[],
+ unsigned int num)
+{
+ unsigned int h;
+
+ /* Gcc nexted function extension. How cool is this? */
+ int compare_seqnum(const void *_a, const void *_b)
+ {
+ const struct op_desc *a = _a, *b = _b;
+
+ /* First, maintain order within any trace file. */
+ if (a->file == b->file)
+ return a->op_num - b->op_num;
+
+ /* Otherwise, arrange by seqnum order. */
+ if (op[a->file][a->op_num].seqnum !=
+ op[b->file][b->op_num].seqnum)
+ return op[a->file][a->op_num].seqnum
+ - op[b->file][b->op_num].seqnum;
+
+ /* Cancelled transactions are assumed to happen first. */
+ if (starts_transaction(&op[a->file][a->op_num])
+ && !successful_transaction(&op[a->file][a->op_num]))
+ return -1;
+ if (starts_transaction(&op[b->file][b->op_num])
+ && !successful_transaction(&op[b->file][b->op_num]))
+ return 1;
+
+ /* No idea. */
+ return 0;
+ }
+
+ /* Now sort into seqnum order. */
+ for (h = 0; h < total_keys * 2; h++) {
+ struct op_desc *user = hash[h].user;
+
+ qsort(user, hash[h].num_users, sizeof(user[0]), compare_seqnum);
+ figure_deps(filename, op, &hash[h].key, user, hash[h].num_users,
+ num);
+ }
+}
+
+static int destroy_depend(struct depend *dep)
+{
+ list_del(&dep->pre_list);
+ list_del(&dep->post_list);
+ return 0;
+}
+
+static void add_dependency(void *ctx,
+ struct op *op[],
+ char *filename[],
+ const struct op_desc *needs,
+ const struct op_desc *prereq)
+{
+ struct depend *dep;
+
+ /* We don't depend on ourselves. */
+ if (needs->file == prereq->file) {
+ assert(prereq->op_num < needs->op_num);
+ return;
+ }
+
+#if DEBUG_DEPS
+ printf("%s:%u: depends on %s:%u\n",
+ filename[needs->file], needs->op_num+1,
+ filename[prereq->file], prereq->op_num+1);
+#endif
+
+ dep = talloc(ctx, struct depend);
+ dep->needs = *needs;
+ dep->prereq = *prereq;
+
+#if TRAVERSALS_TAKE_TRANSACTION_LOCK
+ /* If something in a traverse depends on something in another
+ * traverse/transaction, it creates a dependency between the
+ * two groups. */
+ if ((in_traverse(op[prereq->file], prereq->op_num)
+ && (starts_transaction(&op[needs->file][needs->op_num])
+ || starts_traverse(&op[needs->file][needs->op_num])))
+ || (in_traverse(op[needs->file], needs->op_num)
+ && (starts_transaction(&op[prereq->file][prereq->op_num])
+ || starts_traverse(&op[prereq->file][prereq->op_num])))) {
+ unsigned int start;
+
+ /* We are satisfied by end of group. */
+ start = op[prereq->file][prereq->op_num].group_start;
+ dep->prereq.op_num = start + op[prereq->file][start].group_len;
+ /* And we need that done by start of our group. */
+ dep->needs.op_num = op[needs->file][needs->op_num].group_start;
+ }
+
+ /* There is also this case:
+ * <traverse> <read foo> ...
+ * <transaction> ... </transaction> <create foo>
+ * Where if we start the traverse then wait, we could block
+ * the transaction and deadlock.
+ *
+ * We try to address this by ensuring that where seqnum indicates it's
+ * possible, we wait for <create foo> before *starting* traverse.
+ */
+ else if (in_traverse(op[needs->file], needs->op_num)) {
+ struct op *need = &op[needs->file][needs->op_num];
+ if (op[needs->file][need->group_start].seqnum >
+ op[prereq->file][prereq->op_num].seqnum) {
+ dep->needs.op_num = need->group_start;
+ }
+ }
+#endif
+
+ /* If you depend on a transaction or chainlock, you actually
+ * depend on it ending. */
+ if (starts_transaction(&op[prereq->file][dep->prereq.op_num])
+ || starts_chainlock(&op[prereq->file][dep->prereq.op_num])) {
+ dep->prereq.op_num
+ += op[dep->prereq.file][dep->prereq.op_num].group_len;
+#if DEBUG_DEPS
+ printf("-> Actually end of transaction %s:%u\n",
+ filename[dep->prereq->file], dep->prereq->op_num+1);
+#endif
+ } else
+ /* We should never create a dependency from middle of
+ * a transaction. */
+ assert(!in_transaction(op[prereq->file], dep->prereq.op_num)
+ || op[prereq->file][dep->prereq.op_num].type
+ == OP_TDB_TRANSACTION_COMMIT
+ || op[prereq->file][dep->prereq.op_num].type
+ == OP_TDB_TRANSACTION_CANCEL);
+
+ list_add(&op[dep->prereq.file][dep->prereq.op_num].post,
+ &dep->post_list);
+ list_add(&op[dep->needs.file][dep->needs.op_num].pre,
+ &dep->pre_list);
+ talloc_set_destructor(dep, destroy_depend);
+}
+
+static bool changes_db(const TDB_DATA *key, const struct op *op)
+{
+ return gives(key, NULL, op) != NULL;
+}
+
+static void depend_on_previous(struct op *op[],
+ char *filename[],
+ unsigned int num,
+ struct op_desc user[],
+ unsigned int i,
+ int prev)
+{
+ bool deps[num];
+ int j;
+
+ if (i == 0)
+ return;
+
+ if (prev == i - 1) {
+ /* Just depend on previous. */
+ add_dependency(NULL, op, filename, &user[i], &user[prev]);
+ return;
+ }
+
+ /* We have to wait for the readers. Find last one in *each* file. */
+ memset(deps, 0, sizeof(deps));
+ deps[user[i].file] = true;
+ for (j = i - 1; j > prev; j--) {
+ if (!deps[user[j].file]) {
+ add_dependency(NULL, op, filename, &user[i], &user[j]);
+ deps[user[j].file] = true;
+ }
+ }
+}
+
+/* This is simple, but not complete. We don't take into account
+ * indirect dependencies. */
+static void optimize_dependencies(struct op *op[], unsigned int num_ops[],
+ unsigned int num)
+{
+ unsigned int i, j;
+
+ /* There can only be one real dependency on each file */
+ for (i = 0; i < num; i++) {
+ for (j = 1; j < num_ops[i]; j++) {
+ struct depend *dep, *next;
+ struct depend *prev[num];
+
+ memset(prev, 0, sizeof(prev));
+
+ list_for_each_safe(&op[i][j].pre, dep, next, pre_list) {
+ if (!prev[dep->prereq.file]) {
+ prev[dep->prereq.file] = dep;
+ continue;
+ }
+ if (prev[dep->prereq.file]->prereq.op_num
+ < dep->prereq.op_num) {
+ talloc_free(prev[dep->prereq.file]);
+ prev[dep->prereq.file] = dep;
+ } else
+ talloc_free(dep);
+ }
+ }
+ }
+
+ for (i = 0; i < num; i++) {
+ int deps[num];
+
+ for (j = 0; j < num; j++)
+ deps[j] = -1;
+
+ for (j = 1; j < num_ops[i]; j++) {
+ struct depend *dep, *next;
+
+ list_for_each_safe(&op[i][j].pre, dep, next, pre_list) {
+ if (deps[dep->prereq.file]
+ >= (int)dep->prereq.op_num)
+ talloc_free(dep);
+ else
+ deps[dep->prereq.file]
+ = dep->prereq.op_num;
+ }
+ }
+ }
+}
+
+#if TRAVERSALS_TAKE_TRANSACTION_LOCK
+/* Force an order among the traversals, so they don't deadlock (as much) */
+static void make_traverse_depends(char *filename[],
+ struct op *op[], unsigned int num_ops[],
+ unsigned int num)
+{
+ unsigned int i, num_traversals = 0;
+ int j;
+ struct op_desc *desc;
+
+ /* Sort by which one runs first. */
+ int compare_traverse_desc(const void *_a, const void *_b)
+ {
+ const struct op_desc *da = _a, *db = _b;
+ const struct op *a = &op[da->file][da->op_num],
+ *b = &op[db->file][db->op_num];
+
+ if (a->seqnum != b->seqnum)
+ return a->seqnum - b->seqnum;
+
+ /* If they have same seqnum, it means one didn't make any
+ * changes. Thus sort by end in that case. */
+ return a[a->group_len].seqnum - b[b->group_len].seqnum;
+ }
+
+ desc = talloc_array(NULL, struct op_desc, 1);
+
+ /* Count them. */
+ for (i = 0; i < num; i++) {
+ for (j = 1; j < num_ops[i]; j++) {
+ /* Traverse start (ignore those in
+ * transactions; they're already covered by
+ * transaction dependencies). */
+ if (starts_traverse(&op[i][j])
+ && !in_transaction(op[i], j)) {
+ desc = talloc_realloc(NULL, desc,
+ struct op_desc,
+ num_traversals+1);
+ desc[num_traversals].file = i;
+ desc[num_traversals].op_num = j;
+ num_traversals++;
+ }
+ }
+ }
+ qsort(desc, num_traversals, sizeof(desc[0]), compare_traverse_desc);
+
+ for (i = 1; i < num_traversals; i++) {
+ const struct op *prev = &op[desc[i-1].file][desc[i-1].op_num];
+ const struct op *curr = &op[desc[i].file][desc[i].op_num];
+
+ /* Read traverses don't depend on each other (read lock). */
+ if (prev->type == OP_TDB_TRAVERSE_READ_START
+ && curr->type == OP_TDB_TRAVERSE_READ_START)
+ continue;
+
+ /* Only make dependency if it's clear. */
+ if (compare_traverse_desc(&desc[i], &desc[i-1])) {
+ /* i depends on end of traverse i-1. */
+ struct op_desc end = desc[i-1];
+ end.op_num += prev->group_len;
+ add_dependency(NULL, op, filename, &desc[i], &end);
+ }
+ }
+ talloc_free(desc);
+}
+
+static void set_nonblock(int fd)
+{
+ if (fcntl(fd, F_SETFL, fcntl(fd, F_GETFL)|O_NONBLOCK) != 0)
+ err(1, "Setting pipe nonblocking");
+}
+
+static bool handle_backoff(struct op *op[], int fd)
+{
+ struct op_desc desc;
+ bool handled = false;
+
+ /* Sloppy coding: we assume PIPEBUF never fills. */
+ while (read(fd, &desc, sizeof(desc)) != -1) {
+ unsigned int i;
+ handled = true;
+ for (i = desc.op_num; i > 0; i--) {
+ if (op[desc.file][i].type == OP_TDB_TRAVERSE) {
+ /* We insert a fake end here. */
+ op[desc.file][i].type
+ = OP_TDB_TRAVERSE_END_EARLY;
+ break;
+ } else if (starts_traverse(&op[desc.file][i])) {
+ unsigned int start = i;
+ struct op tmp = op[desc.file][i];
+ /* Move the ops outside traverse. */
+ memmove(&op[desc.file][i],
+ &op[desc.file][i+1],
+ (desc.op_num-i-1) * sizeof(op[0][0]));
+ op[desc.file][desc.op_num] = tmp;
+ while (op[desc.file][i].group_start == start) {
+ op[desc.file][i++].group_start
+ = desc.op_num;
+ }
+ break;
+ }
+ }
+ }
+ return handled;
+}
+
+#else /* !TRAVERSALS_TAKE_TRANSACTION_LOCK */
+static bool handle_backoff(struct op *op[], int fd)
+{
+ return false;
+}
+#endif
+
+static void derive_dependencies(char *filename[],
+ struct op *op[], unsigned int num_ops[],
+ unsigned int num)
+{
+ struct keyinfo *hash;
+ unsigned int h, i;
+
+ /* Create hash table for faster key lookup. */
+ hash = hash_ops(op, num_ops, num);
+
+ /* Sort them by sequence number. */
+ sort_ops(hash, filename, op, num);
+
+ /* Create dependencies back to the last change, rather than
+ * creating false dependencies by naively making each one
+ * depend on the previous. This has two purposes: it makes
+ * later optimization simpler, and it also avoids deadlock with
+ * same sequence number ops inside traversals (if one
+ * traversal doesn't write anything, two ops can have the same
+ * sequence number yet we can create a traversal dependency
+ * the other way). */
+ for (h = 0; h < total_keys * 2; h++) {
+ int prev = -1;
+
+ if (hash[h].num_users < 2)
+ continue;
+
+ for (i = 0; i < hash[h].num_users; i++) {
+ if (changes_db(&hash[h].key, &op[hash[h].user[i].file]
+ [hash[h].user[i].op_num])) {
+ depend_on_previous(op, filename, num,
+ hash[h].user, i, prev);
+ prev = i;
+ } else if (prev >= 0)
+ add_dependency(hash, op, filename,
+ &hash[h].user[i],
+ &hash[h].user[prev]);
+ }
+ }
+
+#if TRAVERSALS_TAKE_TRANSACTION_LOCK
+ make_traverse_depends(filename, op, num_ops, num);
+#endif
+
+ optimize_dependencies(op, num_ops, num);
+}
+
+static struct timeval run_test(char *argv[],
+ unsigned int num_ops[],
+ unsigned int hashsize[],
+ unsigned int tdb_flags[],
+ unsigned int open_flags[],
+ struct op *op[],
+ int fds[2])
+{
+ unsigned int i;
+ struct timeval start, end, diff;
+ bool ok = true;
+
+ for (i = 0; argv[i+2]; i++) {
+ struct tdb_context *tdb;
+ char c;
+
+ switch (fork()) {
+ case -1:
+ err(1, "fork failed");
+ case 0:
+ close(fds[1]);
+ tdb = tdb_open_ex(argv[1], hashsize[i],
+ tdb_flags[i]|TDB_NOSYNC,
+ open_flags[i], 0600, NULL, hash_key);
+ if (!tdb)
+ err(1, "Opening tdb %s", argv[1]);
+
+ /* This catches parent exiting. */
+ if (read(fds[0], &c, 1) != 1)
+ exit(1);
+ run_ops(tdb, pipes[i].fd[0], argv+2, op, i, 1,
+ num_ops[i], false);
+ check_deps(argv[2+i], op[i], num_ops[i]);
+ exit(0);
+ default:
+ break;
+ }
+ }
+
+ /* Let everything settle. */
+ sleep(1);
+
+ printf("Starting run...");
+ fflush(stdout);