+ if (op->type == OP_TDB_STORE)
+ return &op->data;
+
+ return pre;
+}
+
+static void add_hash_user(struct keyinfo *hash,
+ unsigned int h,
+ struct op *op[],
+ unsigned int file,
+ unsigned int op_num)
+{
+ hash[h].user = talloc_realloc(hash, hash[h].user,
+ struct op_desc, hash[h].num_users+1);
+
+ /* If it's in a transaction, it's the transaction which
+ * matters from an analysis POV. */
+ if (in_transaction(op[file], op_num)
+ || in_chainlock(op[file], op_num)) {
+ unsigned i;
+
+ op_num = op[file][op_num].group_start;
+
+ /* Don't include twice. */
+ for (i = 0; i < hash[h].num_users; i++) {
+ if (hash[h].user[i].file == file
+ && hash[h].user[i].op_num == op_num)
+ return;
+ }
+ }
+ hash[h].user[hash[h].num_users].op_num = op_num;
+ hash[h].user[hash[h].num_users].file = file;
+ hash[h].num_users++;
+}
+
+static struct keyinfo *hash_ops(struct op *op[], unsigned int num_ops[],
+ unsigned int num)
+{
+ unsigned int i, j, h;
+ struct keyinfo *hash;
+
+ hash = talloc_zero_array(op[0], struct keyinfo, total_keys*2);
+ for (i = 0; i < num; i++) {
+ for (j = 1; j < num_ops[i]; j++) {
+ /* We can't do this on allocation, due to realloc. */
+ list_head_init(&op[i][j].post);
+ list_head_init(&op[i][j].pre);
+
+ if (!op[i][j].key.dptr)
+ continue;
+
+ h = hash_key(&op[i][j].key) % (total_keys * 2);
+ while (!key_eq(hash[h].key, op[i][j].key)) {
+ if (!hash[h].key.dptr) {
+ hash[h].key = op[i][j].key;
+ break;
+ }
+ h = (h + 1) % (total_keys * 2);
+ }
+ /* Might as well save some memory if we can. */
+ if (op[i][j].key.dptr != hash[h].key.dptr) {
+ talloc_free(op[i][j].key.dptr);
+ op[i][j].key.dptr = hash[h].key.dptr;
+ }
+
+ add_hash_user(hash, h, op, i, j);
+ }
+ }
+
+ /* Any wipe all entries need adding to all hash entries. */
+ for (h = 0; h < total_keys*2; h++) {
+ if (!hash[h].num_users)
+ continue;
+
+ for (i = 0; i < num_wipe_alls; i++)
+ add_hash_user(hash, h, op,
+ wipe_alls[i].file, wipe_alls[i].op_num);
+ }
+
+ return hash;
+}
+
+static bool satisfies(const TDB_DATA *key, const TDB_DATA *data,
+ const struct op *op)
+{
+ const TDB_DATA *need = needs(key, op);
+
+ /* Don't need anything? Cool. */
+ if (!need)
+ return true;
+
+ /* This should be tdb_null or a real value. */
+ assert(data != &must_exist);
+ assert(data != &must_not_exist);
+ assert(data != ¬_exists_or_empty);
+
+ /* Must not exist? data must not exist. */
+ if (need == &must_not_exist)
+ return data == &tdb_null;
+
+ /* Must exist? */
+ if (need == &must_exist)
+ return data != &tdb_null;
+
+ /* Either noexist or empty. */
+ if (need == ¬_exists_or_empty)
+ return data->dsize == 0;
+
+ /* Needs something specific. */
+ return key_eq(*data, *need);
+}
+
+static void move_to_front(struct op_desc res[], unsigned off, unsigned elem)
+{
+ if (elem != off) {
+ struct op_desc tmp = res[elem];
+ memmove(res + off + 1, res + off, (elem - off)*sizeof(res[0]));
+ res[off] = tmp;
+ }
+}
+
+static void restore_to_pos(struct op_desc res[], unsigned off, unsigned elem)
+{
+ if (elem != off) {
+ struct op_desc tmp = res[off];
+ memmove(res + off, res + off + 1, (elem - off)*sizeof(res[0]));
+ res[elem] = tmp;
+ }
+}
+
+static bool sort_deps(char *filename[], struct op *op[],
+ struct op_desc res[],
+ unsigned off, unsigned num,
+ const TDB_DATA *key, const TDB_DATA *data,
+ unsigned num_files, unsigned fuzz)
+{
+ unsigned int i, files_done;
+ struct op *this_op;
+ bool done[num_files];
+
+ /* None left? We're sorted. */
+ if (off == num)
+ return true;
+
+ /* Does this make sequence number go backwards? Allow a little fuzz. */
+ if (off > 0) {
+ int seqnum1 = op[res[off-1].file][res[off-1].op_num].seqnum;
+ int seqnum2 = op[res[off].file][res[off].op_num].seqnum;
+
+ if (seqnum1 - seqnum2 > (int)fuzz) {
+#if DEBUG_DEPS
+ printf("Seqnum jump too far (%u -> %u)\n",
+ seqnum1, seqnum2);
+#endif
+ return false;
+ }
+ }
+
+ memset(done, 0, sizeof(done));
+
+ /* Since ops within a trace file are ordered, we just need to figure
+ * out which file to try next. Since we don't take into account
+ * inter-key relationships (which exist by virtue of trace file order),
+ * we minimize the chance of harm by trying to keep in seqnum order. */
+ for (files_done = 0, i = off; i < num && files_done < num_files; i++) {
+ if (done[res[i].file])
+ continue;
+
+ this_op = &op[res[i].file][res[i].op_num];
+
+ /* Is what we have good enough for this op? */
+ if (satisfies(key, data, this_op)) {
+ move_to_front(res, off, i);
+ if (sort_deps(filename, op, res, off+1, num,
+ key, gives(key, data, this_op),
+ num_files, fuzz))
+ return true;
+ restore_to_pos(res, off, i);
+ }
+ done[res[i].file] = true;
+ files_done++;
+ }
+
+ /* No combination worked. */
+ return false;
+}
+
+static void check_dep_sorting(struct op_desc user[], unsigned num_users,
+ unsigned num_files)
+{
+#if DEBUG_DEPS
+ unsigned int i;
+ unsigned minima[num_files];
+
+ memset(minima, 0, sizeof(minima));
+ for (i = 0; i < num_users; i++) {
+ assert(minima[user[i].file] < user[i].op_num);
+ minima[user[i].file] = user[i].op_num;
+ }
+#endif
+}
+
+/* All these ops happen on the same key. Which comes first?
+ *
+ * This can happen both because read ops or failed write ops don't
+ * change sequence number, and also due to race since we access the
+ * number unlocked (the race can cause less detectable ordering problems,
+ * in which case we'll deadlock and report: fix manually in that case).
+ */
+static bool figure_deps(char *filename[], struct op *op[],
+ const TDB_DATA *key, const TDB_DATA *data,
+ struct op_desc user[],
+ unsigned num_users, unsigned num_files)
+{
+ unsigned int fuzz;
+
+ /* We prefer to keep strict seqnum order if possible: it's the
+ * most likely. We get more lax if that fails. */
+ for (fuzz = 0; fuzz < 100; fuzz = (fuzz + 1)*2) {
+ if (sort_deps(filename, op, user, 0, num_users, key, data,
+ num_files, fuzz))
+ break;
+ }
+
+ if (fuzz >= 100)
+ return false;
+
+ check_dep_sorting(user, num_users, num_files);
+ return true;
+}
+
+/* We're having trouble sorting out dependencies for this key. Assume that it's
+ * a pre-existing record in the db, so determine a likely value. */
+static const TDB_DATA *preexisting_data(char *filename[], struct op *op[],
+ const TDB_DATA *key,
+ struct op_desc *user,
+ unsigned int num_users)
+{
+ unsigned int i;
+ const TDB_DATA *data;
+
+ for (i = 0; i < num_users; i++) {
+ data = needs(key, &op[user->file][user->op_num]);
+ if (data && data != &must_not_exist) {
+ if (!quiet)
+ printf("%s:%u: needs pre-existing record\n",
+ filename[user->file], user->op_num+1);
+ return data;
+ }
+ }
+ return &tdb_null;
+}
+
+static void sort_ops(struct tdb_context *tdb,
+ struct keyinfo hash[], char *filename[], struct op *op[],
+ unsigned int num)
+{
+ unsigned int h;
+
+ /* Gcc nexted function extension. How cool is this? */
+ int compare_seqnum(const void *_a, const void *_b)
+ {
+ const struct op_desc *a = _a, *b = _b;
+
+ /* First, maintain order within any trace file. */
+ if (a->file == b->file)
+ return a->op_num - b->op_num;
+
+ /* Otherwise, arrange by seqnum order. */
+ if (op[a->file][a->op_num].seqnum !=
+ op[b->file][b->op_num].seqnum)
+ return op[a->file][a->op_num].seqnum
+ - op[b->file][b->op_num].seqnum;
+
+ /* Cancelled transactions are assumed to happen first. */
+ if (starts_transaction(&op[a->file][a->op_num])
+ && !successful_transaction(&op[a->file][a->op_num]))
+ return -1;
+ if (starts_transaction(&op[b->file][b->op_num])
+ && !successful_transaction(&op[b->file][b->op_num]))
+ return 1;
+
+ /* No idea. */
+ return 0;
+ }
+
+ /* Now sort into seqnum order. */
+ for (h = 0; h < total_keys * 2; h++) {
+ struct op_desc *user = hash[h].user;
+
+ qsort(user, hash[h].num_users, sizeof(user[0]), compare_seqnum);
+ if (!figure_deps(filename, op, &hash[h].key, &tdb_null, user,
+ hash[h].num_users, num)) {
+ const TDB_DATA *data;
+
+ data = preexisting_data(filename, op, &hash[h].key,
+ user, hash[h].num_users);
+ /* Give the first op what it wants: does that help? */
+ if (!figure_deps(filename, op, &hash[h].key, data, user,
+ hash[h].num_users, num))
+ fail(filename[user[0].file], user[0].op_num+1,
+ "Could not resolve inter-dependencies");
+ if (tdb_store(tdb, hash[h].key, *data, TDB_INSERT) != 0)
+ errx(1, "Could not store initial value");
+ }
+ }
+}
+
+static int destroy_depend(struct depend *dep)
+{
+ list_del(&dep->pre_list);
+ list_del(&dep->post_list);
+ return 0;
+}
+
+static void add_dependency(void *ctx,
+ struct op *op[],
+ char *filename[],
+ const struct op_desc *needs,
+ const struct op_desc *prereq)
+{
+ struct depend *dep;
+
+ /* We don't depend on ourselves. */
+ if (needs->file == prereq->file) {
+ assert(prereq->op_num < needs->op_num);
+ return;
+ }
+
+#if DEBUG_DEPS
+ printf("%s:%u: depends on %s:%u\n",
+ filename[needs->file], needs->op_num+1,
+ filename[prereq->file], prereq->op_num+1);
+#endif
+
+ dep = talloc(ctx, struct depend);
+ dep->needs = *needs;
+ dep->prereq = *prereq;
+
+#if TRAVERSALS_TAKE_TRANSACTION_LOCK
+ /* If something in a traverse depends on something in another
+ * traverse/transaction, it creates a dependency between the
+ * two groups. */
+ if ((in_traverse(op[prereq->file], prereq->op_num)
+ && (starts_transaction(&op[needs->file][needs->op_num])
+ || starts_traverse(&op[needs->file][needs->op_num])))
+ || (in_traverse(op[needs->file], needs->op_num)
+ && (starts_transaction(&op[prereq->file][prereq->op_num])
+ || starts_traverse(&op[prereq->file][prereq->op_num])))) {
+ unsigned int start;
+
+ /* We are satisfied by end of group. */
+ start = op[prereq->file][prereq->op_num].group_start;
+ dep->prereq.op_num = start + op[prereq->file][start].group_len;
+ /* And we need that done by start of our group. */
+ dep->needs.op_num = op[needs->file][needs->op_num].group_start;
+ }
+
+ /* There is also this case:
+ * <traverse> <read foo> ...
+ * <transaction> ... </transaction> <create foo>
+ * Where if we start the traverse then wait, we could block
+ * the transaction and deadlock.
+ *
+ * We try to address this by ensuring that where seqnum indicates it's
+ * possible, we wait for <create foo> before *starting* traverse.
+ */
+ else if (in_traverse(op[needs->file], needs->op_num)) {
+ struct op *need = &op[needs->file][needs->op_num];
+ if (op[needs->file][need->group_start].seqnum >
+ op[prereq->file][prereq->op_num].seqnum) {
+ dep->needs.op_num = need->group_start;
+ }
+ }
+#endif
+
+ /* If you depend on a transaction or chainlock, you actually
+ * depend on it ending. */
+ if (starts_transaction(&op[prereq->file][dep->prereq.op_num])
+ || starts_chainlock(&op[prereq->file][dep->prereq.op_num])) {
+ dep->prereq.op_num
+ += op[dep->prereq.file][dep->prereq.op_num].group_len;
+#if DEBUG_DEPS
+ printf("-> Actually end of transaction %s:%u\n",
+ filename[dep->prereq->file], dep->prereq->op_num+1);
+#endif
+ } else
+ /* We should never create a dependency from middle of
+ * a transaction. */
+ assert(!in_transaction(op[prereq->file], dep->prereq.op_num)
+ || op[prereq->file][dep->prereq.op_num].type
+ == OP_TDB_TRANSACTION_COMMIT
+ || op[prereq->file][dep->prereq.op_num].type
+ == OP_TDB_TRANSACTION_CANCEL);
+
+ list_add(&op[dep->prereq.file][dep->prereq.op_num].post,
+ &dep->post_list);
+ list_add(&op[dep->needs.file][dep->needs.op_num].pre,
+ &dep->pre_list);
+ talloc_set_destructor(dep, destroy_depend);
+}
+
+static bool changes_db(const TDB_DATA *key, const struct op *op)
+{
+ return gives(key, NULL, op) != NULL;
+}
+
+static void depend_on_previous(struct op *op[],
+ char *filename[],
+ unsigned int num,
+ struct op_desc user[],
+ unsigned int i,
+ int prev)
+{
+ bool deps[num];
+ int j;
+
+ if (i == 0)
+ return;
+
+ if (prev == i - 1) {
+ /* Just depend on previous. */
+ add_dependency(NULL, op, filename, &user[i], &user[prev]);
+ return;
+ }
+
+ /* We have to wait for the readers. Find last one in *each* file. */
+ memset(deps, 0, sizeof(deps));
+ deps[user[i].file] = true;
+ for (j = i - 1; j > prev; j--) {
+ if (!deps[user[j].file]) {
+ add_dependency(NULL, op, filename, &user[i], &user[j]);
+ deps[user[j].file] = true;
+ }
+ }
+}
+
+/* This is simple, but not complete. We don't take into account
+ * indirect dependencies. */
+static void optimize_dependencies(struct op *op[], unsigned int num_ops[],
+ unsigned int num)
+{
+ unsigned int i, j;
+
+ /* There can only be one real dependency on each file */
+ for (i = 0; i < num; i++) {
+ for (j = 1; j < num_ops[i]; j++) {
+ struct depend *dep, *next;
+ struct depend *prev[num];
+
+ memset(prev, 0, sizeof(prev));
+
+ list_for_each_safe(&op[i][j].pre, dep, next, pre_list) {
+ if (!prev[dep->prereq.file]) {
+ prev[dep->prereq.file] = dep;
+ continue;
+ }
+ if (prev[dep->prereq.file]->prereq.op_num
+ < dep->prereq.op_num) {
+ talloc_free(prev[dep->prereq.file]);
+ prev[dep->prereq.file] = dep;
+ } else
+ talloc_free(dep);
+ }
+ }
+ }
+
+ for (i = 0; i < num; i++) {
+ int deps[num];
+
+ for (j = 0; j < num; j++)
+ deps[j] = -1;
+
+ for (j = 1; j < num_ops[i]; j++) {
+ struct depend *dep, *next;
+
+ list_for_each_safe(&op[i][j].pre, dep, next, pre_list) {
+ if (deps[dep->prereq.file]
+ >= (int)dep->prereq.op_num)
+ talloc_free(dep);
+ else
+ deps[dep->prereq.file]
+ = dep->prereq.op_num;
+ }
+ }
+ }
+}
+
+#if TRAVERSALS_TAKE_TRANSACTION_LOCK
+/* Force an order among the traversals, so they don't deadlock (as much) */
+static void make_traverse_depends(char *filename[],
+ struct op *op[], unsigned int num_ops[],
+ unsigned int num)
+{
+ unsigned int i, num_traversals = 0;
+ int j;
+ struct op_desc *desc;
+
+ /* Sort by which one runs first. */
+ int compare_traverse_desc(const void *_a, const void *_b)
+ {
+ const struct op_desc *da = _a, *db = _b;
+ const struct op *a = &op[da->file][da->op_num],
+ *b = &op[db->file][db->op_num];
+
+ if (a->seqnum != b->seqnum)
+ return a->seqnum - b->seqnum;
+
+ /* If they have same seqnum, it means one didn't make any
+ * changes. Thus sort by end in that case. */
+ return a[a->group_len].seqnum - b[b->group_len].seqnum;
+ }
+
+ desc = talloc_array(NULL, struct op_desc, 1);
+
+ /* Count them. */
+ for (i = 0; i < num; i++) {
+ for (j = 1; j < num_ops[i]; j++) {
+ /* Traverse start (ignore those in
+ * transactions; they're already covered by
+ * transaction dependencies). */
+ if (starts_traverse(&op[i][j])
+ && !in_transaction(op[i], j)) {
+ desc = talloc_realloc(NULL, desc,
+ struct op_desc,
+ num_traversals+1);
+ desc[num_traversals].file = i;
+ desc[num_traversals].op_num = j;
+ num_traversals++;
+ }
+ }
+ }
+ qsort(desc, num_traversals, sizeof(desc[0]), compare_traverse_desc);
+
+ for (i = 1; i < num_traversals; i++) {
+ const struct op *prev = &op[desc[i-1].file][desc[i-1].op_num];
+ const struct op *curr = &op[desc[i].file][desc[i].op_num];
+
+ /* Read traverses don't depend on each other (read lock). */
+ if (prev->type == OP_TDB_TRAVERSE_READ_START
+ && curr->type == OP_TDB_TRAVERSE_READ_START)
+ continue;
+
+ /* Only make dependency if it's clear. */
+ if (compare_traverse_desc(&desc[i], &desc[i-1])) {
+ /* i depends on end of traverse i-1. */
+ struct op_desc end = desc[i-1];
+ end.op_num += prev->group_len;
+ add_dependency(NULL, op, filename, &desc[i], &end);
+ }
+ }
+ talloc_free(desc);
+}
+
+static void set_nonblock(int fd)
+{
+ if (fcntl(fd, F_SETFL, fcntl(fd, F_GETFL)|O_NONBLOCK) != 0)
+ err(1, "Setting pipe nonblocking");
+}
+
+static bool handle_backoff(struct op *op[], int fd)
+{
+ struct op_desc desc;
+ bool handled = false;
+
+ /* Sloppy coding: we assume PIPEBUF never fills. */
+ while (read(fd, &desc, sizeof(desc)) != -1) {
+ unsigned int i;
+ handled = true;
+ for (i = desc.op_num; i > 0; i--) {
+ if (op[desc.file][i].type == OP_TDB_TRAVERSE) {
+ /* We insert a fake end here. */
+ op[desc.file][i].type
+ = OP_TDB_TRAVERSE_END_EARLY;
+ break;
+ } else if (starts_traverse(&op[desc.file][i])) {
+ unsigned int start = i;
+ struct op tmp = op[desc.file][i];
+ /* Move the ops outside traverse. */
+ memmove(&op[desc.file][i],
+ &op[desc.file][i+1],
+ (desc.op_num-i-1) * sizeof(op[0][0]));
+ op[desc.file][desc.op_num] = tmp;
+ while (op[desc.file][i].group_start == start) {
+ op[desc.file][i++].group_start
+ = desc.op_num;
+ }
+ break;
+ }
+ }
+ }
+ return handled;
+}
+
+#else /* !TRAVERSALS_TAKE_TRANSACTION_LOCK */
+static bool handle_backoff(struct op *op[], int fd)
+{
+ return false;
+}
+#endif
+
+static void derive_dependencies(struct tdb_context *tdb,
+ char *filename[],
+ struct op *op[], unsigned int num_ops[],
+ unsigned int num)
+{
+ struct keyinfo *hash;
+ unsigned int h, i;
+
+ /* Create hash table for faster key lookup. */
+ hash = hash_ops(op, num_ops, num);
+
+ /* Sort them by sequence number. */
+ sort_ops(tdb, hash, filename, op, num);
+
+ /* Create dependencies back to the last change, rather than
+ * creating false dependencies by naively making each one
+ * depend on the previous. This has two purposes: it makes
+ * later optimization simpler, and it also avoids deadlock with
+ * same sequence number ops inside traversals (if one
+ * traversal doesn't write anything, two ops can have the same
+ * sequence number yet we can create a traversal dependency
+ * the other way). */
+ for (h = 0; h < total_keys * 2; h++) {
+ int prev = -1;
+
+ if (hash[h].num_users < 2)
+ continue;
+
+ for (i = 0; i < hash[h].num_users; i++) {
+ if (changes_db(&hash[h].key, &op[hash[h].user[i].file]
+ [hash[h].user[i].op_num])) {
+ depend_on_previous(op, filename, num,
+ hash[h].user, i, prev);
+ prev = i;
+ } else if (prev >= 0)
+ add_dependency(hash, op, filename,
+ &hash[h].user[i],
+ &hash[h].user[prev]);
+ }
+ }
+
+#if TRAVERSALS_TAKE_TRANSACTION_LOCK
+ make_traverse_depends(filename, op, num_ops, num);
+#endif
+
+ optimize_dependencies(op, num_ops, num);
+}
+
+static struct timeval run_test(char *argv[],
+ unsigned int num_ops[],
+ unsigned int hashsize[],
+ unsigned int tdb_flags[],
+ unsigned int open_flags[],
+ struct op *op[],
+ int fds[2])
+{
+ unsigned int i;
+ struct timeval start, end, diff;
+ bool ok = true;
+
+ for (i = 0; argv[i+2]; i++) {
+ struct tdb_context *tdb;
+ char c;
+
+ switch (fork()) {
+ case -1:
+ err(1, "fork failed");
+ case 0:
+ close(fds[1]);
+ tdb = tdb_open(argv[1], hashsize[i],
+ tdb_flags[i], open_flags[i], 0600);
+ if (!tdb)
+ err(1, "Opening tdb %s", argv[1]);
+
+ /* This catches parent exiting. */
+ if (read(fds[0], &c, 1) != 1)
+ exit(1);
+ run_ops(tdb, pipes[i].fd[0], argv+2, op, i, 1,
+ num_ops[i], false);
+ check_deps(argv[2+i], op[i], num_ops[i]);
+ exit(0);
+ default:
+ break;
+ }
+ }
+
+ /* Let everything settle. */
+ sleep(1);
+
+ if (!quiet)
+ printf("Starting run...");
+ fflush(stdout);