]> git.ozlabs.org Git - ccan/blobdiff - ccan/tdb2/summary.c
ttxml: removed cruft from tests
[ccan] / ccan / tdb2 / summary.c
index b54b56e7a712239a74efa87de8a6bde0cce023dc..f3a3a085f38654032db922c284fb9833e34dcd5d 100644 (file)
@@ -1,7 +1,7 @@
- /* 
+ /*
    Trivial Database 2: human-readable summary code
    Copyright (C) Rusty Russell 2010
-   
+
    This library is free software; you can redistribute it and/or
    modify it under the terms of the GNU Lesser General Public
    License as published by the Free Software Foundation; either
 #include <assert.h>
 #include <ccan/tally/tally.h>
 
-static int count_hash(struct tdb_context *tdb,
-                     tdb_off_t hash_off, unsigned bits)
+#define SUMMARY_FORMAT \
+       "Size of file/data: %zu/%zu\n" \
+       "Number of records: %zu\n" \
+       "Smallest/average/largest keys: %zu/%zu/%zu\n%s" \
+       "Smallest/average/largest data: %zu/%zu/%zu\n%s" \
+       "Smallest/average/largest padding: %zu/%zu/%zu\n%s" \
+       "Number of free records: %zu\n" \
+       "Smallest/average/largest free records: %zu/%zu/%zu\n%s" \
+       "Number of uncoalesced records: %zu\n" \
+       "Smallest/average/largest uncoalesced runs: %zu/%zu/%zu\n%s" \
+       "Toplevel hash used: %u of %u\n" \
+       "Number of chains: %zu\n" \
+       "Number of subhashes: %zu\n" \
+       "Smallest/average/largest subhash entries: %zu/%zu/%zu\n%s" \
+       "Percentage keys/data/padding/free/rechdrs/freehdrs/hashes: %.0f/%.0f/%.0f/%.0f/%.0f/%.0f/%.0f\n"
+
+#define BUCKET_SUMMARY_FORMAT_A                                        \
+       "Free bucket %zu: total entries %zu.\n"                 \
+       "Smallest/average/largest length: %zu/%zu/%zu\n%s"
+#define BUCKET_SUMMARY_FORMAT_B                                        \
+       "Free bucket %zu-%zu: total entries %zu.\n"             \
+       "Smallest/average/largest length: %zu/%zu/%zu\n%s"
+#define CAPABILITY_FORMAT                                      \
+       "Capability %llu%s\n"
+
+#define HISTO_WIDTH 70
+#define HISTO_HEIGHT 20
+
+static tdb_off_t count_hash(struct tdb_context *tdb,
+                           tdb_off_t hash_off, unsigned bits)
 {
        const tdb_off_t *h;
-       unsigned int i, count = 0;
+       tdb_off_t count = 0;
+       unsigned int i;
 
        h = tdb_access_read(tdb, hash_off, sizeof(*h) << bits, true);
-       if (!h)
-               return -1;
+       if (TDB_PTR_IS_ERR(h)) {
+               return TDB_ERR_TO_OFF(TDB_PTR_ERR(h));
+       }
        for (i = 0; i < (1 << bits); i++)
                count += (h[i] != 0);
 
@@ -35,21 +65,23 @@ static int count_hash(struct tdb_context *tdb,
        return count;
 }
 
-static bool summarize(struct tdb_context *tdb,
-                     struct tally *hashes,
-                     struct tally *flists,
-                     struct tally *free,
-                     struct tally *keys,
-                     struct tally *data,
-                     struct tally *extra,
-                     struct tally *uncoal,
-                     struct tally *buckets)
+static enum TDB_ERROR summarize(struct tdb_context *tdb,
+                               struct tally *hashes,
+                               struct tally *ftables,
+                               struct tally *fr,
+                               struct tally *keys,
+                               struct tally *data,
+                               struct tally *extra,
+                               struct tally *uncoal,
+                               struct tally *chains)
 {
        tdb_off_t off;
        tdb_len_t len;
        tdb_len_t unc = 0;
 
-       for (off = sizeof(struct tdb_header); off < tdb->map_size; off += len) {
+       for (off = sizeof(struct tdb_header);
+            off < tdb->file->map_size;
+            off += len) {
                const union {
                        struct tdb_used_record u;
                        struct tdb_free_record f;
@@ -57,120 +89,182 @@ static bool summarize(struct tdb_context *tdb,
                } *p;
                /* We might not be able to get the whole thing. */
                p = tdb_access_read(tdb, off, sizeof(p->f), true);
-               if (!p)
-                       return false;
-               if (p->r.magic == TDB_RECOVERY_INVALID_MAGIC
-                   || p->r.magic == TDB_RECOVERY_MAGIC) {
-                       if (unc) {
+               if (TDB_PTR_IS_ERR(p)) {
+                       return TDB_PTR_ERR(p);
+               }
+               if (frec_magic(&p->f) != TDB_FREE_MAGIC) {
+                       if (unc > 1) {
                                tally_add(uncoal, unc);
                                unc = 0;
                        }
+               }
+
+               if (p->r.magic == TDB_RECOVERY_INVALID_MAGIC
+                   || p->r.magic == TDB_RECOVERY_MAGIC) {
                        len = sizeof(p->r) + p->r.max_len;
                } else if (frec_magic(&p->f) == TDB_FREE_MAGIC) {
                        len = frec_len(&p->f);
-                       tally_add(free, len);
-                       tally_add(buckets, size_to_bucket(len));
+                       tally_add(fr, len);
                        len += sizeof(p->u);
                        unc++;
-               } else if (rec_magic(&p->u) == TDB_MAGIC) {
-                       if (unc) {
-                               tally_add(uncoal, unc);
-                               unc = 0;
-                       }
+               } else if (rec_magic(&p->u) == TDB_USED_MAGIC) {
                        len = sizeof(p->u)
                                + rec_key_length(&p->u)
                                + rec_data_length(&p->u)
                                + rec_extra_padding(&p->u);
 
-                       /* FIXME: Use different magic for hashes, flists. */
-                       if (!rec_key_length(&p->u) && rec_hash(&p->u) < 2) {
-                               if (rec_hash(&p->u) == 0) {
-                                       int count = count_hash(tdb,
-                                                       off + sizeof(p->u),
-                                                       TDB_SUBLEVEL_HASH_BITS);
-                                       if (count == -1)
-                                               return false;
-                                       tally_add(hashes, count);
-                               } else {
-                                       tally_add(flists,
-                                                 rec_data_length(&p->u));
-                               }
-                       } else {
-                               tally_add(keys, rec_key_length(&p->u));
-                               tally_add(data, rec_data_length(&p->u));
+                       tally_add(keys, rec_key_length(&p->u));
+                       tally_add(data, rec_data_length(&p->u));
+                       tally_add(extra, rec_extra_padding(&p->u));
+               } else if (rec_magic(&p->u) == TDB_HTABLE_MAGIC) {
+                       tdb_off_t count = count_hash(tdb,
+                                                    off + sizeof(p->u),
+                                                    TDB_SUBLEVEL_HASH_BITS);
+                       if (TDB_OFF_IS_ERR(count)) {
+                               return TDB_OFF_TO_ERR(count);
                        }
+                       tally_add(hashes, count);
+                       tally_add(extra, rec_extra_padding(&p->u));
+                       len = sizeof(p->u)
+                               + rec_data_length(&p->u)
+                               + rec_extra_padding(&p->u);
+               } else if (rec_magic(&p->u) == TDB_FTABLE_MAGIC) {
+                       len = sizeof(p->u)
+                               + rec_data_length(&p->u)
+                               + rec_extra_padding(&p->u);
+                       tally_add(ftables, rec_data_length(&p->u));
+                       tally_add(extra, rec_extra_padding(&p->u));
+               } else if (rec_magic(&p->u) == TDB_CHAIN_MAGIC) {
+                       len = sizeof(p->u)
+                               + rec_data_length(&p->u)
+                               + rec_extra_padding(&p->u);
+                       tally_add(chains, 1);
                        tally_add(extra, rec_extra_padding(&p->u));
-               } else
+               } else {
                        len = dead_space(tdb, off);
+                       if (TDB_OFF_IS_ERR(len)) {
+                               return TDB_OFF_TO_ERR(len);
+                       }
+               }
                tdb_access_release(tdb, p);
        }
        if (unc)
                tally_add(uncoal, unc);
-       return true;
+       return TDB_SUCCESS;
 }
 
-#define SUMMARY_FORMAT \
-       "Size of file/data: %zu/%zu\n" \
-       "Number of records: %zu\n" \
-       "Smallest/average/largest keys: %zu/%zu/%zu\n%s" \
-       "Smallest/average/largest data: %zu/%zu/%zu\n%s" \
-       "Smallest/average/largest padding: %zu/%zu/%zu\n%s" \
-       "Number of free records: %zu\n" \
-       "Smallest/average/largest free records: %zu/%zu/%zu\n%s" \
-       "Number of uncoalesced records: %zu\n" \
-       "Smallest/average/largest uncoalesced runs: %zu/%zu/%zu\n%s" \
-       "Number of free lists: %zu\n%s" \
-       "Toplevel hash used: %u of %u\n" \
-       "Number of subhashes: %zu\n" \
-       "Smallest/average/largest subhash entries: %zu/%zu/%zu\n%s" \
-       "Percentage keys/data/padding/free/rechdrs/freehdrs/hashes: %.0f/%.0f/%.0f/%.0f/%.0f/%.0f/%.0f\n"
+static size_t num_capabilities(struct tdb_context *tdb)
+{
+       tdb_off_t off, next;
+       const struct tdb_capability *cap;
+       size_t count = 0;
 
-#define BUCKET_SUMMARY_FORMAT_A                                        \
-       "Free bucket %zu: total entries %zu.\n"                 \
-       "Smallest/average/largest length: %zu/%zu/%zu\n%s"
-#define BUCKET_SUMMARY_FORMAT_B                                        \
-       "Free bucket %zu-%zu: total entries %zu.\n"             \
-       "Smallest/average/largest length: %zu/%zu/%zu\n%s"
+       off = tdb_read_off(tdb, offsetof(struct tdb_header, capabilities));
+       if (TDB_OFF_IS_ERR(off))
+               return count;
 
-#define HISTO_WIDTH 70
-#define HISTO_HEIGHT 20
+       /* Count capability list. */
+       for (; off; off = next) {
+               cap = tdb_access_read(tdb, off, sizeof(*cap), true);
+               if (TDB_PTR_IS_ERR(cap)) {
+                       break;
+               }
+               count++;
+               next = cap->next;
+               tdb_access_release(tdb, cap);
+       }
+       return count;
+}
+
+static void add_capabilities(struct tdb_context *tdb, size_t num, char *summary)
+{
+       tdb_off_t off, next;
+       const struct tdb_capability *cap;
+       size_t count = 0;
+
+       /* Append to summary. */
+       summary += strlen(summary);
 
-char *tdb_summary(struct tdb_context *tdb, enum tdb_summary_flags flags)
+       off = tdb_read_off(tdb, offsetof(struct tdb_header, capabilities));
+       if (TDB_OFF_IS_ERR(off))
+               return;
+
+       /* Walk capability list. */
+       for (; off; off = next) {
+               cap = tdb_access_read(tdb, off, sizeof(*cap), true);
+               if (TDB_PTR_IS_ERR(cap)) {
+                       break;
+               }
+               count++;
+               sprintf(summary, CAPABILITY_FORMAT,
+                       cap->type & TDB_CAP_TYPE_MASK,
+                       /* Noopen?  How did we get here? */
+                       (cap->type & TDB_CAP_NOOPEN) ? " (unopenable)"
+                       : ((cap->type & TDB_CAP_NOWRITE)
+                          && (cap->type & TDB_CAP_NOCHECK)) ? " (uncheckable,read-only)"
+                       : (cap->type & TDB_CAP_NOWRITE) ? " (read-only)"
+                       : (cap->type & TDB_CAP_NOCHECK) ? " (uncheckable)"
+                       : "");
+               summary += strlen(summary);
+               next = cap->next;
+               tdb_access_release(tdb, cap);
+       }
+}
+
+enum TDB_ERROR tdb_summary(struct tdb_context *tdb,
+                          enum tdb_summary_flags flags,
+                          char **summary)
 {
        tdb_len_t len;
-       struct tally *flists, *hashes, *freet, *keys, *data, *extra, *uncoal,
-               *buckets;
-       char *hashesg, *freeg, *keysg, *datag, *extrag, *uncoalg, *bucketsg;
-       char *ret = NULL;
+       size_t num_caps;
+       struct tally *ftables, *hashes, *freet, *keys, *data, *extra, *uncoal,
+               *chains;
+       char *hashesg, *freeg, *keysg, *datag, *extrag, *uncoalg;
+       enum TDB_ERROR ecode;
 
-       hashesg = freeg = keysg = datag = extrag = uncoalg = bucketsg = NULL;
+       if (tdb->flags & TDB_VERSION1) {
+               /* tdb1 doesn't do graphs. */
+               *summary = tdb1_summary(tdb);
+               if (!*summary)
+                       return tdb->last_error;
+               return TDB_SUCCESS;
+       }
 
-       if (tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false) != 0)
-               return NULL;
+       hashesg = freeg = keysg = datag = extrag = uncoalg = NULL;
 
-       if (tdb_lock_expand(tdb, F_RDLCK) != 0) {
+       ecode = tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
+       if (ecode != TDB_SUCCESS) {
+               return tdb->last_error = ecode;
+       }
+
+       ecode = tdb_lock_expand(tdb, F_RDLCK);
+       if (ecode != TDB_SUCCESS) {
                tdb_allrecord_unlock(tdb, F_RDLCK);
-               return NULL;
+               return tdb->last_error = ecode;
        }
 
        /* Start stats off empty. */
-       flists = tally_new(HISTO_HEIGHT);
+       ftables = tally_new(HISTO_HEIGHT);
        hashes = tally_new(HISTO_HEIGHT);
        freet = tally_new(HISTO_HEIGHT);
        keys = tally_new(HISTO_HEIGHT);
        data = tally_new(HISTO_HEIGHT);
        extra = tally_new(HISTO_HEIGHT);
        uncoal = tally_new(HISTO_HEIGHT);
-       buckets = tally_new(HISTO_HEIGHT);
-       if (!flists || !hashes || !freet || !keys || !data || !extra
-           || !uncoal || !buckets) {
-               tdb->ecode = TDB_ERR_OOM;
+       chains = tally_new(HISTO_HEIGHT);
+       if (!ftables || !hashes || !freet || !keys || !data || !extra
+           || !uncoal || !chains) {
+               ecode = tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
+                                  "tdb_summary: failed to allocate"
+                                  " tally structures");
                goto unlock;
        }
 
-       if (!summarize(tdb, hashes, flists, freet, keys, data, extra, uncoal,
-                      buckets))
+       ecode = summarize(tdb, hashes, ftables, freet, keys, data, extra,
+                         uncoal, chains);
+       if (ecode != TDB_SUCCESS) {
                goto unlock;
+       }
 
        if (flags & TDB_SUMMARY_HISTOGRAMS) {
                hashesg = tally_histogram(hashes, HISTO_WIDTH, HISTO_HEIGHT);
@@ -179,9 +273,10 @@ char *tdb_summary(struct tdb_context *tdb, enum tdb_summary_flags flags)
                datag = tally_histogram(data, HISTO_WIDTH, HISTO_HEIGHT);
                extrag = tally_histogram(extra, HISTO_WIDTH, HISTO_HEIGHT);
                uncoalg = tally_histogram(uncoal, HISTO_WIDTH, HISTO_HEIGHT);
-               bucketsg = tally_histogram(buckets, HISTO_WIDTH, HISTO_HEIGHT);
        }
 
+       num_caps = num_capabilities(tdb);
+
        /* 20 is max length of a %llu. */
        len = strlen(SUMMARY_FORMAT) + 33*20 + 1
                + (hashesg ? strlen(hashesg) : 0)
@@ -190,48 +285,54 @@ char *tdb_summary(struct tdb_context *tdb, enum tdb_summary_flags flags)
                + (datag ? strlen(datag) : 0)
                + (extrag ? strlen(extrag) : 0)
                + (uncoalg ? strlen(uncoalg) : 0)
-               + (bucketsg ? strlen(bucketsg) : 0);
+               + num_caps * (strlen(CAPABILITY_FORMAT) + 20*4);
 
-       ret = malloc(len);
-       if (!ret)
+       *summary = malloc(len);
+       if (!*summary) {
+               ecode = tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
+                                  "tdb_summary: failed to allocate string");
                goto unlock;
+       }
+
+       sprintf(*summary, SUMMARY_FORMAT,
+               (size_t)tdb->file->map_size,
+               tally_total(keys, NULL) + tally_total(data, NULL),
+               tally_num(keys),
+               tally_min(keys), tally_mean(keys), tally_max(keys),
+               keysg ? keysg : "",
+               tally_min(data), tally_mean(data), tally_max(data),
+               datag ? datag : "",
+               tally_min(extra), tally_mean(extra), tally_max(extra),
+               extrag ? extrag : "",
+               tally_num(freet),
+               tally_min(freet), tally_mean(freet), tally_max(freet),
+               freeg ? freeg : "",
+               tally_total(uncoal, NULL),
+               tally_min(uncoal), tally_mean(uncoal), tally_max(uncoal),
+               uncoalg ? uncoalg : "",
+               (unsigned)count_hash(tdb, offsetof(struct tdb_header,
+                                                  hashtable),
+                                    TDB_TOPLEVEL_HASH_BITS),
+               1 << TDB_TOPLEVEL_HASH_BITS,
+               tally_num(chains),
+               tally_num(hashes),
+               tally_min(hashes), tally_mean(hashes), tally_max(hashes),
+               hashesg ? hashesg : "",
+               tally_total(keys, NULL) * 100.0 / tdb->file->map_size,
+               tally_total(data, NULL) * 100.0 / tdb->file->map_size,
+               tally_total(extra, NULL) * 100.0 / tdb->file->map_size,
+               tally_total(freet, NULL) * 100.0 / tdb->file->map_size,
+               (tally_num(keys) + tally_num(freet) + tally_num(hashes))
+               * sizeof(struct tdb_used_record) * 100.0 / tdb->file->map_size,
+               tally_num(ftables) * sizeof(struct tdb_freetable)
+               * 100.0 / tdb->file->map_size,
+               (tally_num(hashes)
+                * (sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS)
+                + (sizeof(tdb_off_t) << TDB_TOPLEVEL_HASH_BITS)
+                + sizeof(struct tdb_chain) * tally_num(chains))
+               * 100.0 / tdb->file->map_size);
 
-       len = sprintf(ret, SUMMARY_FORMAT,
-                     (size_t)tdb->map_size,
-                     tally_num(keys) + tally_num(data),
-                     tally_num(keys),
-                     tally_min(keys), tally_mean(keys), tally_max(keys),
-                     keysg ? keysg : "",
-                     tally_min(data), tally_mean(data), tally_max(data),
-                     datag ? datag : "",
-                     tally_min(extra), tally_mean(extra), tally_max(extra),
-                     extrag ? extrag : "",
-                     tally_num(freet),
-                     tally_min(freet), tally_mean(freet), tally_max(freet),
-                     freeg ? freeg : "",
-                     tally_total(uncoal, NULL),
-                     tally_min(uncoal), tally_mean(uncoal), tally_max(uncoal),
-                     uncoalg ? uncoalg : "",
-                     tally_num(buckets),
-                     bucketsg ? bucketsg : "",
-                     count_hash(tdb, offsetof(struct tdb_header, hashtable),
-                                TDB_TOPLEVEL_HASH_BITS),
-                     1 << TDB_TOPLEVEL_HASH_BITS,
-                     tally_num(hashes),
-                     tally_min(hashes), tally_mean(hashes), tally_max(hashes),
-                     hashesg ? hashesg : "",
-                     tally_total(keys, NULL) * 100.0 / tdb->map_size,
-                     tally_total(data, NULL) * 100.0 / tdb->map_size,
-                     tally_total(extra, NULL) * 100.0 / tdb->map_size,
-                     tally_total(freet, NULL) * 100.0 / tdb->map_size,
-                     (tally_num(keys) + tally_num(freet) + tally_num(hashes))
-                     * sizeof(struct tdb_used_record) * 100.0 / tdb->map_size,
-                     tally_num(flists) * sizeof(struct tdb_freelist)
-                     * 100.0 / tdb->map_size,
-                     (tally_num(hashes)
-                      * (sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS)
-                      + (sizeof(tdb_off_t) << TDB_TOPLEVEL_HASH_BITS))
-                     * 100.0 / tdb->map_size);
+       add_capabilities(tdb, num_caps, *summary);
 
 unlock:
        free(hashesg);
@@ -240,16 +341,16 @@ unlock:
        free(datag);
        free(extrag);
        free(uncoalg);
-       free(bucketsg);
        free(hashes);
-       free(buckets);
        free(freet);
        free(keys);
        free(data);
        free(extra);
        free(uncoal);
+       free(ftables);
+       free(chains);
 
        tdb_allrecord_unlock(tdb, F_RDLCK);
        tdb_unlock_expand(tdb, F_RDLCK);
-       return ret;
+       return tdb->last_error = ecode;
 }