struct tdb_data k, d;
int64_t count = 0;
+ k.dptr = NULL;
for (ret = first_in_hash(tdb, ltype, &tinfo, &k, &d.dsize);
ret == 1;
ret = next_in_hash(tdb, ltype, &tinfo, &k, &d.dsize)) {
d.dptr = k.dptr + k.dsize;
count++;
- if (fn && fn(tdb, k, d, p))
+ if (fn && fn(tdb, k, d, p)) {
+ free(k.dptr);
break;
+ }
+ free(k.dptr);
}
if (ret < 0)
tdb->read_only = was_ro;
return ret;
}
+
+TDB_DATA tdb_firstkey(struct tdb_context *tdb)
+{
+ struct traverse_info tinfo;
+ struct tdb_data k;
+ switch (first_in_hash(tdb, F_RDLCK, &tinfo, &k, NULL)) {
+ case 1:
+ return k;
+ case 0:
+ tdb->ecode = TDB_SUCCESS;
+ /* Fall thru... */
+ default:
+ return tdb_null;
+ }
+}
+
+/* We lock twice, not very efficient. We could keep last key & tinfo cached. */
+TDB_DATA tdb_nextkey(struct tdb_context *tdb, TDB_DATA key)
+{
+ struct traverse_info tinfo;
+ struct hash_info h;
+ struct tdb_used_record rec;
+
+ tinfo.prev = find_and_lock(tdb, key, F_RDLCK, &h, &rec, &tinfo);
+ if (unlikely(tinfo.prev == TDB_OFF_ERR))
+ return tdb_null;
+ tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_RDLCK);
+
+ switch (next_in_hash(tdb, F_RDLCK, &tinfo, &key, NULL)) {
+ case 1:
+ return key;
+ case 0:
+ tdb->ecode = TDB_SUCCESS;
+ /* Fall thru... */
+ default:
+ return tdb_null;
+ }
+}