#include <assert.h>
#include <ccan/tally/tally.h>
+#define SUMMARY_FORMAT \
+ "Size of file/data: %zu/%zu\n" \
+ "Number of records: %zu\n" \
+ "Smallest/average/largest keys: %zu/%zu/%zu\n%s" \
+ "Smallest/average/largest data: %zu/%zu/%zu\n%s" \
+ "Smallest/average/largest padding: %zu/%zu/%zu\n%s" \
+ "Number of free records: %zu\n" \
+ "Smallest/average/largest free records: %zu/%zu/%zu\n%s" \
+ "Number of uncoalesced records: %zu\n" \
+ "Smallest/average/largest uncoalesced runs: %zu/%zu/%zu\n%s" \
+ "Toplevel hash used: %u of %u\n" \
+ "Number of chains: %zu\n" \
+ "Number of subhashes: %zu\n" \
+ "Smallest/average/largest subhash entries: %zu/%zu/%zu\n%s" \
+ "Percentage keys/data/padding/free/rechdrs/freehdrs/hashes: %.0f/%.0f/%.0f/%.0f/%.0f/%.0f/%.0f\n"
+
+#define BUCKET_SUMMARY_FORMAT_A \
+ "Free bucket %zu: total entries %zu.\n" \
+ "Smallest/average/largest length: %zu/%zu/%zu\n%s"
+#define BUCKET_SUMMARY_FORMAT_B \
+ "Free bucket %zu-%zu: total entries %zu.\n" \
+ "Smallest/average/largest length: %zu/%zu/%zu\n%s"
+#define CAPABILITY_FORMAT \
+ "Capability %llu%s\n"
+
+#define HISTO_WIDTH 70
+#define HISTO_HEIGHT 20
+
static tdb_off_t count_hash(struct tdb_context *tdb,
tdb_off_t hash_off, unsigned bits)
{
h = tdb_access_read(tdb, hash_off, sizeof(*h) << bits, true);
if (TDB_PTR_IS_ERR(h)) {
- return TDB_PTR_ERR(h);
+ return TDB_ERR_TO_OFF(TDB_PTR_ERR(h));
}
for (i = 0; i < (1 << bits); i++)
count += (h[i] != 0);
struct tally *data,
struct tally *extra,
struct tally *uncoal,
- struct tally *buckets,
struct tally *chains)
{
tdb_off_t off;
if (TDB_PTR_IS_ERR(p)) {
return TDB_PTR_ERR(p);
}
- if (p->r.magic == TDB_RECOVERY_INVALID_MAGIC
- || p->r.magic == TDB_RECOVERY_MAGIC) {
- if (unc) {
+ if (frec_magic(&p->f) != TDB_FREE_MAGIC) {
+ if (unc > 1) {
tally_add(uncoal, unc);
unc = 0;
}
+ }
+
+ if (p->r.magic == TDB_RECOVERY_INVALID_MAGIC
+ || p->r.magic == TDB_RECOVERY_MAGIC) {
len = sizeof(p->r) + p->r.max_len;
} else if (frec_magic(&p->f) == TDB_FREE_MAGIC) {
len = frec_len(&p->f);
tally_add(fr, len);
- tally_add(buckets, size_to_bucket(len));
len += sizeof(p->u);
unc++;
} else if (rec_magic(&p->u) == TDB_USED_MAGIC) {
- if (unc) {
- tally_add(uncoal, unc);
- unc = 0;
- }
len = sizeof(p->u)
+ rec_key_length(&p->u)
+ rec_data_length(&p->u)
off + sizeof(p->u),
TDB_SUBLEVEL_HASH_BITS);
if (TDB_OFF_IS_ERR(count)) {
- return count;
+ return TDB_OFF_TO_ERR(count);
}
tally_add(hashes, count);
tally_add(extra, rec_extra_padding(&p->u));
} else {
len = dead_space(tdb, off);
if (TDB_OFF_IS_ERR(len)) {
- return len;
+ return TDB_OFF_TO_ERR(len);
}
}
tdb_access_release(tdb, p);
return TDB_SUCCESS;
}
-#define SUMMARY_FORMAT \
- "Size of file/data: %zu/%zu\n" \
- "Number of records: %zu\n" \
- "Smallest/average/largest keys: %zu/%zu/%zu\n%s" \
- "Smallest/average/largest data: %zu/%zu/%zu\n%s" \
- "Smallest/average/largest padding: %zu/%zu/%zu\n%s" \
- "Number of free records: %zu\n" \
- "Smallest/average/largest free records: %zu/%zu/%zu\n%s" \
- "Number of uncoalesced records: %zu\n" \
- "Smallest/average/largest uncoalesced runs: %zu/%zu/%zu\n%s" \
- "Number of free lists: %zu\n%s" \
- "Toplevel hash used: %u of %u\n" \
- "Number of chains: %zu\n" \
- "Number of subhashes: %zu\n" \
- "Smallest/average/largest subhash entries: %zu/%zu/%zu\n%s" \
- "Percentage keys/data/padding/free/rechdrs/freehdrs/hashes: %.0f/%.0f/%.0f/%.0f/%.0f/%.0f/%.0f\n"
+static size_t num_capabilities(struct tdb_context *tdb)
+{
+ tdb_off_t off, next;
+ const struct tdb_capability *cap;
+ size_t count = 0;
-#define BUCKET_SUMMARY_FORMAT_A \
- "Free bucket %zu: total entries %zu.\n" \
- "Smallest/average/largest length: %zu/%zu/%zu\n%s"
-#define BUCKET_SUMMARY_FORMAT_B \
- "Free bucket %zu-%zu: total entries %zu.\n" \
- "Smallest/average/largest length: %zu/%zu/%zu\n%s"
+ off = tdb_read_off(tdb, offsetof(struct tdb_header, capabilities));
+ if (TDB_OFF_IS_ERR(off))
+ return count;
-#define HISTO_WIDTH 70
-#define HISTO_HEIGHT 20
+ /* Count capability list. */
+ for (; off; off = next) {
+ cap = tdb_access_read(tdb, off, sizeof(*cap), true);
+ if (TDB_PTR_IS_ERR(cap)) {
+ break;
+ }
+ count++;
+ next = cap->next;
+ tdb_access_release(tdb, cap);
+ }
+ return count;
+}
+
+static void add_capabilities(struct tdb_context *tdb, size_t num, char *summary)
+{
+ tdb_off_t off, next;
+ const struct tdb_capability *cap;
+ size_t count = 0;
+
+ /* Append to summary. */
+ summary += strlen(summary);
+
+ off = tdb_read_off(tdb, offsetof(struct tdb_header, capabilities));
+ if (TDB_OFF_IS_ERR(off))
+ return;
+
+ /* Walk capability list. */
+ for (; off; off = next) {
+ cap = tdb_access_read(tdb, off, sizeof(*cap), true);
+ if (TDB_PTR_IS_ERR(cap)) {
+ break;
+ }
+ count++;
+ sprintf(summary, CAPABILITY_FORMAT,
+ cap->type & TDB_CAP_TYPE_MASK,
+ /* Noopen? How did we get here? */
+ (cap->type & TDB_CAP_NOOPEN) ? " (unopenable)"
+ : ((cap->type & TDB_CAP_NOWRITE)
+ && (cap->type & TDB_CAP_NOCHECK)) ? " (uncheckable,read-only)"
+ : (cap->type & TDB_CAP_NOWRITE) ? " (read-only)"
+ : (cap->type & TDB_CAP_NOCHECK) ? " (uncheckable)"
+ : "");
+ summary += strlen(summary);
+ next = cap->next;
+ tdb_access_release(tdb, cap);
+ }
+}
enum TDB_ERROR tdb_summary(struct tdb_context *tdb,
enum tdb_summary_flags flags,
char **summary)
{
tdb_len_t len;
+ size_t num_caps;
struct tally *ftables, *hashes, *freet, *keys, *data, *extra, *uncoal,
- *buckets, *chains;
- char *hashesg, *freeg, *keysg, *datag, *extrag, *uncoalg, *bucketsg;
+ *chains;
+ char *hashesg, *freeg, *keysg, *datag, *extrag, *uncoalg;
enum TDB_ERROR ecode;
- hashesg = freeg = keysg = datag = extrag = uncoalg = bucketsg = NULL;
+ if (tdb->flags & TDB_VERSION1) {
+ /* tdb1 doesn't do graphs. */
+ *summary = tdb1_summary(tdb);
+ if (!*summary)
+ return tdb->last_error;
+ return TDB_SUCCESS;
+ }
+
+ hashesg = freeg = keysg = datag = extrag = uncoalg = NULL;
ecode = tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
if (ecode != TDB_SUCCESS) {
data = tally_new(HISTO_HEIGHT);
extra = tally_new(HISTO_HEIGHT);
uncoal = tally_new(HISTO_HEIGHT);
- buckets = tally_new(HISTO_HEIGHT);
chains = tally_new(HISTO_HEIGHT);
if (!ftables || !hashes || !freet || !keys || !data || !extra
- || !uncoal || !buckets || !chains) {
+ || !uncoal || !chains) {
ecode = tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
"tdb_summary: failed to allocate"
" tally structures");
}
ecode = summarize(tdb, hashes, ftables, freet, keys, data, extra,
- uncoal, buckets, chains);
+ uncoal, chains);
if (ecode != TDB_SUCCESS) {
goto unlock;
}
datag = tally_histogram(data, HISTO_WIDTH, HISTO_HEIGHT);
extrag = tally_histogram(extra, HISTO_WIDTH, HISTO_HEIGHT);
uncoalg = tally_histogram(uncoal, HISTO_WIDTH, HISTO_HEIGHT);
- bucketsg = tally_histogram(buckets, HISTO_WIDTH, HISTO_HEIGHT);
}
+ num_caps = num_capabilities(tdb);
+
/* 20 is max length of a %llu. */
len = strlen(SUMMARY_FORMAT) + 33*20 + 1
+ (hashesg ? strlen(hashesg) : 0)
+ (datag ? strlen(datag) : 0)
+ (extrag ? strlen(extrag) : 0)
+ (uncoalg ? strlen(uncoalg) : 0)
- + (bucketsg ? strlen(bucketsg) : 0);
+ + num_caps * (strlen(CAPABILITY_FORMAT) + 20*4);
*summary = malloc(len);
if (!*summary) {
sprintf(*summary, SUMMARY_FORMAT,
(size_t)tdb->file->map_size,
- tally_num(keys) + tally_num(data),
+ tally_total(keys, NULL) + tally_total(data, NULL),
tally_num(keys),
tally_min(keys), tally_mean(keys), tally_max(keys),
keysg ? keysg : "",
tally_total(uncoal, NULL),
tally_min(uncoal), tally_mean(uncoal), tally_max(uncoal),
uncoalg ? uncoalg : "",
- tally_num(buckets),
- bucketsg ? bucketsg : "",
(unsigned)count_hash(tdb, offsetof(struct tdb_header,
hashtable),
TDB_TOPLEVEL_HASH_BITS),
+ sizeof(struct tdb_chain) * tally_num(chains))
* 100.0 / tdb->file->map_size);
+ add_capabilities(tdb, num_caps, *summary);
+
unlock:
free(hashesg);
free(freeg);
free(datag);
free(extrag);
free(uncoalg);
- free(bucketsg);
free(hashes);
- free(buckets);
free(freet);
free(keys);
free(data);