+ return &must_exist;
+ }
+ /* No flags? Don't care */
+ return NULL;
+
+ case OP_TDB_EXISTS:
+ if (op->ret == 1)
+ return &must_exist;
+ else
+ return &must_not_exist;
+
+ case OP_TDB_PARSE_RECORD:
+ if (op->ret < 0)
+ return &must_not_exist;
+ return &must_exist;
+
+ /* FIXME: handle these. */
+ case OP_TDB_WIPE_ALL:
+ case OP_TDB_FIRSTKEY:
+ case OP_TDB_NEXTKEY:
+ case OP_TDB_GET_SEQNUM:
+ case OP_TDB_TRAVERSE:
+ case OP_TDB_TRANSACTION_COMMIT:
+ case OP_TDB_TRANSACTION_CANCEL:
+ case OP_TDB_TRANSACTION_START:
+ return NULL;
+
+ case OP_TDB_FETCH:
+ if (!op->data.dptr)
+ return &must_not_exist;
+ return &op->data;
+
+ case OP_TDB_DELETE:
+ if (op->ret < 0)
+ return &must_not_exist;
+ return &must_exist;
+
+ default:
+ errx(1, "Unexpected op type %i", op->type);
+ }
+
+}
+
+static bool starts_transaction(const struct op *op)
+{
+ return op->type == OP_TDB_TRANSACTION_START;
+}
+
+static bool in_transaction(const struct op op[], unsigned int i)
+{
+ return op[i].group_start && starts_transaction(&op[op[i].group_start]);
+}
+
+static bool successful_transaction(const struct op *op)
+{
+ return starts_transaction(op)
+ && op[op->group_len].type == OP_TDB_TRANSACTION_COMMIT;
+}
+
+static bool starts_traverse(const struct op *op)
+{
+ return op->type == OP_TDB_TRAVERSE_START
+ || op->type == OP_TDB_TRAVERSE_READ_START;
+}
+
+static bool in_traverse(const struct op op[], unsigned int i)
+{
+ return op[i].group_start && starts_traverse(&op[op[i].group_start]);
+}
+
+static bool starts_chainlock(const struct op *op)
+{
+ return op->type == OP_TDB_CHAINLOCK_READ
+ || op->type == OP_TDB_CHAINLOCK;
+}
+
+static bool in_chainlock(const struct op op[], unsigned int i)
+{
+ return op[i].group_start && starts_chainlock(&op[op[i].group_start]);
+}
+
+/* What's the data after this op? pre if nothing changed. */
+static const TDB_DATA *gives(const TDB_DATA *key, const TDB_DATA *pre,
+ const struct op *op)
+{
+ if (starts_transaction(op) || starts_chainlock(op)) {
+ unsigned int i;
+
+ /* Cancelled transactions don't change anything. */
+ if (op[op->group_len].type == OP_TDB_TRANSACTION_CANCEL)
+ return pre;
+ assert(op[op->group_len].type == OP_TDB_TRANSACTION_COMMIT
+ || op[op->group_len].type == OP_TDB_CHAINUNLOCK_READ
+ || op[op->group_len].type == OP_TDB_CHAINUNLOCK);
+
+ for (i = 1; i < op->group_len; i++) {
+ /* This skips nested transactions, too */
+ if (key_eq(op[i].key, *key))
+ pre = gives(key, pre, &op[i]);
+ }
+ return pre;
+ }
+
+ /* Failed ops don't change state of db. */
+ if (op->ret < 0)
+ return pre;
+
+ if (op->type == OP_TDB_DELETE || op->type == OP_TDB_WIPE_ALL)
+ return &tdb_null;
+
+ if (op->type == OP_TDB_APPEND)
+ return &op->append.post;
+
+ if (op->type == OP_TDB_STORE)
+ return &op->data;
+
+ return pre;
+}
+
+static struct keyinfo *hash_ops(struct op *op[], unsigned int num_ops[],
+ unsigned int num)
+{
+ unsigned int i, j, h;
+ struct keyinfo *hash;
+
+ hash = talloc_zero_array(op[0], struct keyinfo, total_keys*2);
+ for (i = 0; i < num; i++) {
+ for (j = 1; j < num_ops[i]; j++) {
+ /* We can't do this on allocation, due to realloc. */
+ list_head_init(&op[i][j].post);
+ list_head_init(&op[i][j].pre);
+
+ if (!op[i][j].key.dptr)
+ continue;
+
+ h = hash_key(&op[i][j].key) % (total_keys * 2);
+ while (!key_eq(hash[h].key, op[i][j].key)) {
+ if (!hash[h].key.dptr) {
+ hash[h].key = op[i][j].key;
+ break;
+ }
+ h = (h + 1) % (total_keys * 2);
+ }
+ /* Might as well save some memory if we can. */
+ if (op[i][j].key.dptr != hash[h].key.dptr) {
+ talloc_free(op[i][j].key.dptr);
+ op[i][j].key.dptr = hash[h].key.dptr;
+ }
+ hash[h].user = talloc_realloc(hash, hash[h].user,
+ struct op_desc,
+ hash[h].num_users+1);
+
+ /* If it's in a transaction, it's the transaction which
+ * matters from an analysis POV. */
+ if (in_transaction(op[i], j)
+ || in_chainlock(op[i], j)) {
+ unsigned start = op[i][j].group_start;
+
+ /* Don't include twice. */
+ if (hash[h].num_users
+ && hash[h].user[hash[h].num_users-1].file
+ == i
+ && hash[h].user[hash[h].num_users-1].op_num
+ == start)
+ continue;
+
+ hash[h].user[hash[h].num_users].op_num = start;
+ } else
+ hash[h].user[hash[h].num_users].op_num = j;
+ hash[h].user[hash[h].num_users].file = i;
+ hash[h].num_users++;
+ }
+ }
+
+ return hash;
+}
+
+static bool satisfies(const TDB_DATA *key, const TDB_DATA *data,
+ const struct op *op)
+{
+ const TDB_DATA *need = NULL;
+
+ if (starts_transaction(op) || starts_chainlock(op)) {
+ unsigned int i;
+
+ /* Look through for an op in this transaction which
+ * needs this key. */
+ for (i = 1; i < op->group_len; i++) {
+ if (key_eq(op[i].key, *key)) {
+ need = needs(&op[i]);
+ /* tdb_exists() is special: there might be
+ * something in the transaction with more
+ * specific requirements. Other ops don't have
+ * specific requirements (eg. store or delete),
+ * but they change the value so we can't get
+ * more information from future ops. */
+ if (op[i].type != OP_TDB_EXISTS)
+ break;
+ }
+ }
+ } else
+ need = needs(op);
+
+ /* Don't need anything? Cool. */
+ if (!need)
+ return true;
+
+ /* This should be tdb_null or a real value. */
+ assert(data != &must_exist);
+ assert(data != &must_not_exist);
+ assert(data != ¬_exists_or_empty);
+
+ /* Must not exist? data must not exist. */
+ if (need == &must_not_exist)
+ return data == &tdb_null;
+
+ /* Must exist? */
+ if (need == &must_exist)
+ return data != &tdb_null;
+
+ /* Either noexist or empty. */
+ if (need == ¬_exists_or_empty)
+ return data->dsize == 0;
+
+ /* Needs something specific. */
+ return key_eq(*data, *need);
+}
+
+static void move_to_front(struct op_desc res[], unsigned off, unsigned elem)
+{
+ if (elem != off) {
+ struct op_desc tmp = res[elem];
+ memmove(res + off + 1, res + off, (elem - off)*sizeof(res[0]));
+ res[off] = tmp;
+ }
+}
+
+static void restore_to_pos(struct op_desc res[], unsigned off, unsigned elem)
+{
+ if (elem != off) {
+ struct op_desc tmp = res[off];
+ memmove(res + off, res + off + 1, (elem - off)*sizeof(res[0]));
+ res[elem] = tmp;
+ }
+}
+
+static bool sort_deps(char *filename[], struct op *op[],
+ struct op_desc res[],
+ unsigned off, unsigned num,
+ const TDB_DATA *key, const TDB_DATA *data,
+ unsigned num_files, unsigned fuzz)
+{
+ unsigned int i, files_done;
+ struct op *this_op;
+ bool done[num_files];
+
+ /* None left? We're sorted. */
+ if (off == num)
+ return true;
+
+ /* Does this make sequence number go backwards? Allow a little fuzz. */
+ if (off > 0) {
+ int seqnum1 = op[res[off-1].file][res[off-1].op_num].seqnum;
+ int seqnum2 = op[res[off].file][res[off].op_num].seqnum;
+
+ if (seqnum1 - seqnum2 > (int)fuzz) {
+#if DEBUG_DEPS
+ printf("Seqnum jump too far (%u -> %u)\n",
+ seqnum1, seqnum2);
+#endif
+ return false;
+ }
+ }
+
+ memset(done, 0, sizeof(done));
+
+ /* Since ops within a trace file are ordered, we just need to figure
+ * out which file to try next. Since we don't take into account
+ * inter-key relationships (which exist by virtue of trace file order),
+ * we minimize the chance of harm by trying to keep in seqnum order. */
+ for (files_done = 0, i = off; i < num && files_done < num_files; i++) {
+ if (done[res[i].file])
+ continue;
+
+ this_op = &op[res[i].file][res[i].op_num];
+
+ /* Is what we have good enough for this op? */
+ if (satisfies(key, data, this_op)) {
+ move_to_front(res, off, i);
+ if (sort_deps(filename, op, res, off+1, num,
+ key, gives(key, data, this_op),
+ num_files, fuzz))
+ return true;
+ restore_to_pos(res, off, i);
+ }
+ done[res[i].file] = true;
+ files_done++;
+ }
+
+ /* No combination worked. */
+ return false;
+}
+
+static void check_dep_sorting(struct op_desc user[], unsigned num_users,
+ unsigned num_files)
+{
+#if DEBUG_DEPS
+ unsigned int i;
+ unsigned minima[num_files];
+
+ memset(minima, 0, sizeof(minima));
+ for (i = 0; i < num_users; i++) {
+ assert(minima[user[i].file] < user[i].op_num);
+ minima[user[i].file] = user[i].op_num;
+ }
+#endif
+}
+
+/* All these ops happen on the same key. Which comes first?
+ *
+ * This can happen both because read ops or failed write ops don't
+ * change sequence number, and also due to race since we access the
+ * number unlocked (the race can cause less detectable ordering problems,
+ * in which case we'll deadlock and report: fix manually in that case).
+ */
+static void figure_deps(char *filename[], struct op *op[],
+ const TDB_DATA *key, struct op_desc user[],
+ unsigned num_users, unsigned num_files)
+{
+ /* We assume database starts empty. */
+ const struct TDB_DATA *data = &tdb_null;
+ unsigned int fuzz;
+
+ /* We prefer to keep strict seqnum order if possible: it's the
+ * most likely. We get more lax if that fails. */
+ for (fuzz = 0; fuzz < 100; fuzz = (fuzz + 1)*2) {
+ if (sort_deps(filename, op, user, 0, num_users, key, data,
+ num_files, fuzz))
+ break;
+ }
+
+ if (fuzz >= 100)
+ fail(filename[user[0].file], user[0].op_num+1,
+ "Could not resolve inter-dependencies");
+
+ check_dep_sorting(user, num_users, num_files);
+}
+
+static void sort_ops(struct keyinfo hash[], char *filename[], struct op *op[],
+ unsigned int num)
+{
+ unsigned int h;
+
+ /* Gcc nexted function extension. How cool is this? */
+ int compare_seqnum(const void *_a, const void *_b)
+ {
+ const struct op_desc *a = _a, *b = _b;
+
+ /* First, maintain order within any trace file. */
+ if (a->file == b->file)
+ return a->op_num - b->op_num;
+
+ /* Otherwise, arrange by seqnum order. */
+ if (op[a->file][a->op_num].seqnum !=
+ op[b->file][b->op_num].seqnum)
+ return op[a->file][a->op_num].seqnum
+ - op[b->file][b->op_num].seqnum;
+
+ /* Cancelled transactions are assumed to happen first. */
+ if (starts_transaction(&op[a->file][a->op_num])
+ && !successful_transaction(&op[a->file][a->op_num]))
+ return -1;
+ if (starts_transaction(&op[b->file][b->op_num])
+ && !successful_transaction(&op[b->file][b->op_num]))
+ return 1;
+
+ /* No idea. */
+ return 0;
+ }
+
+ /* Now sort into seqnum order. */
+ for (h = 0; h < total_keys * 2; h++) {
+ struct op_desc *user = hash[h].user;
+
+ qsort(user, hash[h].num_users, sizeof(user[0]), compare_seqnum);
+ figure_deps(filename, op, &hash[h].key, user, hash[h].num_users,
+ num);
+ }
+}
+
+static int destroy_depend(struct depend *dep)
+{
+ list_del(&dep->pre_list);
+ list_del(&dep->post_list);
+ return 0;
+}
+
+static void add_dependency(void *ctx,
+ struct op *op[],
+ char *filename[],
+ const struct op_desc *needs,
+ const struct op_desc *prereq)
+{
+ struct depend *dep;
+
+ /* We don't depend on ourselves. */
+ if (needs->file == prereq->file) {
+ assert(prereq->op_num < needs->op_num);
+ return;
+ }
+
+#if DEBUG_DEPS
+ printf("%s:%u: depends on %s:%u\n",
+ filename[needs->file], needs->op_num+1,
+ filename[prereq->file], prereq->op_num+1);
+#endif
+
+ dep = talloc(ctx, struct depend);
+ dep->needs = *needs;
+ dep->prereq = *prereq;
+
+#if TRAVERSALS_TAKE_TRANSACTION_LOCK
+ /* If something in a traverse depends on something in another
+ * traverse/transaction, it creates a dependency between the
+ * two groups. */
+ if ((in_traverse(op[prereq->file], prereq->op_num)
+ && (starts_transaction(&op[needs->file][needs->op_num])
+ || starts_traverse(&op[needs->file][needs->op_num])))
+ || (in_traverse(op[needs->file], needs->op_num)
+ && (starts_transaction(&op[prereq->file][prereq->op_num])
+ || starts_traverse(&op[prereq->file][prereq->op_num])))) {
+ unsigned int start;
+
+ /* We are satisfied by end of group. */
+ start = op[prereq->file][prereq->op_num].group_start;
+ dep->prereq.op_num = start + op[prereq->file][start].group_len;
+ /* And we need that done by start of our group. */
+ dep->needs.op_num = op[needs->file][needs->op_num].group_start;