X-Git-Url: http://git.ozlabs.org/?p=ccan;a=blobdiff_plain;f=ccan%2Ftally%2Ftally.c;h=29f055588015f5e38f02513c6dcce39b4657ff20;hp=a5eedcb951bfafc819701a740bcad64e68f162ac;hb=HEAD;hpb=b10913e2abf41d97a3a7b4a00cc0909986d947d5 diff --git a/ccan/tally/tally.c b/ccan/tally/tally.c index a5eedcb9..5cc3352a 100644 --- a/ccan/tally/tally.c +++ b/ccan/tally/tally.c @@ -25,12 +25,15 @@ struct tally *tally_new(unsigned buckets) struct tally *tally; /* There is always 1 bucket. */ - if (buckets == 0) + if (buckets == 0) { buckets = 1; + } /* Overly cautious check for overflow. */ - if (sizeof(*tally) * buckets / sizeof(*tally) != buckets) + if (sizeof(*tally) * buckets / sizeof(*tally) != buckets) { return NULL; + } + tally = (struct tally *)malloc( sizeof(*tally) + sizeof(tally->counts[0])*(buckets-1)); if (tally == NULL) { @@ -49,27 +52,33 @@ struct tally *tally_new(unsigned buckets) static unsigned bucket_of(ssize_t min, unsigned step_bits, ssize_t val) { /* Don't over-shift. */ - if (step_bits == SIZET_BITS) + if (step_bits == SIZET_BITS) { return 0; + } assert(step_bits < SIZET_BITS); - return (size_t)(val - min) >> step_bits; + return ((size_t)val - (size_t)min) >> step_bits; } /* Return the min value in bucket b. */ static ssize_t bucket_min(ssize_t min, unsigned step_bits, unsigned b) { /* Don't over-shift. */ - if (step_bits == SIZET_BITS) + if (step_bits == SIZET_BITS) { return min; + } assert(step_bits < SIZET_BITS); - return min + ((ssize_t)b << step_bits); + return min + ((size_t)b << step_bits); } /* Does shifting by this many bits truncate the number? */ static bool shift_overflows(size_t num, unsigned bits) { - if (bits == 0) + if (bits == 0) { return false; + } + if (bits >= SIZET_BITS) { + return true; + } return ((num << bits) >> 1) != (num << (bits - 1)); } @@ -82,12 +91,13 @@ static void renormalize(struct tally *tally, unsigned int i, old_min; /* Uninitialized? Don't do anything... */ - if (tally->max < tally->min) + if (tally->max < tally->min) { goto update; + } /* If we don't have sufficient range, increase step bits until * buckets cover entire range of ssize_t anyway. */ - range = (new_max - new_min) + 1; + range = ((size_t)new_max - (size_t)new_min) + 1; while (!shift_overflows(tally->buckets, tally->step_bits) && range > ((size_t)tally->buckets << tally->step_bits)) { /* Collapse down. */ @@ -106,11 +116,13 @@ static void renormalize(struct tally *tally, memset(tally->counts, 0, sizeof(tally->counts[0]) * old_min); /* If we moved boundaries, adjust buckets to that ratio. */ - spill = (tally->min - new_min) % (1 << tally->step_bits); - for (i = 0; i < tally->buckets-1; i++) { - size_t adjust = (tally->counts[i] >> tally->step_bits) * spill; - tally->counts[i] -= adjust; - tally->counts[i+1] += adjust; + if (tally->step_bits < SIZET_BITS) { + spill = (tally->min - new_min) % ((size_t)1 << tally->step_bits); + for (i = 0; i < tally->buckets-1; i++) { + size_t adjust = (tally->counts[i] >> tally->step_bits) * spill; + tally->counts[i] -= adjust; + tally->counts[i+1] += adjust; + } } update: @@ -131,15 +143,17 @@ void tally_add(struct tally *tally, ssize_t val) new_max = val; need_renormalize = true; } - if (need_renormalize) + if (need_renormalize) { renormalize(tally, new_min, new_max); + } /* 128-bit arithmetic! If we didn't want exact mean, we could just * pull it out of counts. */ - if (val > 0 && tally->total[0] + val < tally->total[0]) + if (val > 0 && tally->total[0] + val < tally->total[0]) { tally->total[1]++; - else if (val < 0 && tally->total[0] + val > tally->total[0]) + } else if (val < 0 && tally->total[0] + val > tally->total[0]) { tally->total[1]--; + } tally->total[0] += val; tally->counts[bucket_of(tally->min, tally->step_bits, val)]++; } @@ -147,8 +161,9 @@ void tally_add(struct tally *tally, ssize_t val) size_t tally_num(const struct tally *tally) { size_t i, num = 0; - for (i = 0; i < tally->buckets; i++) + for (i = 0; i < tally->buckets; i++) { num += tally->counts[i]; + } return num; } @@ -173,8 +188,9 @@ static unsigned fls64(uint64_t val) #endif uint64_t r = 64; - if (!val) + if (!val) { return 0; + } if (!(val & 0xffffffff00000000ull)) { val <<= 32; r -= 32; @@ -208,46 +224,49 @@ static unsigned fls64(uint64_t val) /* This is stolen straight from Hacker's Delight. */ static uint64_t divlu64(uint64_t u1, uint64_t u0, uint64_t v) { - const uint64_t b = 4294967296ULL; // Number base (32 bits). - uint32_t un[4], // Dividend and divisor - vn[2]; // normalized and broken - // up into halfwords. - uint32_t q[2]; // Quotient as halfwords. - uint64_t un1, un0, // Dividend and divisor - vn0; // as fullwords. - uint64_t qhat; // Estimated quotient digit. - uint64_t rhat; // A remainder. - uint64_t p; // Product of two digits. + const uint64_t b = 4294967296ULL; /* Number base (32 bits). */ + uint32_t un[4], /* Dividend and divisor */ + vn[2]; /* normalized and broken */ + /* up into halfwords. */ + uint32_t q[2]; /* Quotient as halfwords. */ + uint64_t un1, un0, /* Dividend and divisor */ + vn0; /* as fullwords. */ + uint64_t qhat; /* Estimated quotient digit. */ + uint64_t rhat; /* A remainder. */ + uint64_t p; /* Product of two digits. */ int64_t s, i, j, t, k; - if (u1 >= v) // If overflow, return the largest - return (uint64_t)-1; // possible quotient. + if (u1 >= v) { /* If overflow, return the largest */ + return (uint64_t)-1; /* possible quotient. */ + } - s = 64 - fls64(v); // 0 <= s <= 63. - vn0 = v << s; // Normalize divisor. - vn[1] = vn0 >> 32; // Break divisor up into - vn[0] = vn0 & 0xFFFFFFFF; // two 32-bit halves. + s = 64 - fls64(v); /* 0 <= s <= 63. */ + vn0 = v << s; /* Normalize divisor. */ + vn[1] = vn0 >> 32; /* Break divisor up into */ + vn[0] = vn0 & 0xFFFFFFFF; /* two 32-bit halves. */ // Shift dividend left. un1 = ((u1 << s) | (u0 >> (64 - s))) & (-s >> 63); un0 = u0 << s; - un[3] = un1 >> 32; // Break dividend up into - un[2] = un1; // four 32-bit halfwords - un[1] = un0 >> 32; // Note: storing into - un[0] = un0; // halfwords truncates. + un[3] = un1 >> 32; /* Break dividend up into */ + un[2] = un1; /* four 32-bit halfwords */ + un[1] = un0 >> 32; /* Note: storing into */ + un[0] = un0; /* halfwords truncates. */ for (j = 1; j >= 0; j--) { - // Compute estimate qhat of q[j]. + /* Compute estimate qhat of q[j]. */ qhat = (un[j+2]*b + un[j+1])/vn[1]; rhat = (un[j+2]*b + un[j+1]) - qhat*vn[1]; again: if (qhat >= b || qhat*vn[0] > b*rhat + un[j]) { qhat = qhat - 1; rhat = rhat + vn[1]; - if (rhat < b) goto again; + if (rhat < b) { + goto again; + } } - // Multiply and subtract. + /* Multiply and subtract. */ k = 0; for (i = 0; i < 2; i++) { p = qhat*vn[i]; @@ -258,9 +277,9 @@ static uint64_t divlu64(uint64_t u1, uint64_t u0, uint64_t v) t = un[j+2] - k; un[j+2] = t; - q[j] = qhat; // Store quotient digit. - if (t < 0) { // If we subtracted too - q[j] = q[j] - 1; // much, add back. + q[j] = qhat; /* Store quotient digit. */ + if (t < 0) { /* If we subtracted too */ + q[j] = q[j] - 1; /* much, add back. */ k = 0; for (i = 0; i < 2; i++) { t = un[i+j] + vn[i] + k; @@ -269,7 +288,7 @@ static uint64_t divlu64(uint64_t u1, uint64_t u0, uint64_t v) } un[j+2] = un[j+2] + k; } - } // End j. + } /* End j. */ return q[1]*b + q[0]; } @@ -278,26 +297,28 @@ static int64_t divls64(int64_t u1, uint64_t u0, int64_t v) { int64_t q, uneg, vneg, diff, borrow; - uneg = u1 >> 63; // -1 if u < 0. - if (uneg) { // Compute the absolute - u0 = -u0; // value of the dividend u. + uneg = u1 >> 63; /* -1 if u < 0. */ + if (uneg) { /* Compute the absolute */ + u0 = -u0; /* value of the dividend u. */ borrow = (u0 != 0); u1 = -u1 - borrow; } - vneg = v >> 63; // -1 if v < 0. - v = (v ^ vneg) - vneg; // Absolute value of v. + vneg = v >> 63; /* -1 if v < 0. */ + v = (v ^ vneg) - vneg; /* Absolute value of v. */ - if ((uint64_t)u1 >= (uint64_t)v) + if ((uint64_t)u1 >= (uint64_t)v) { goto overflow; + } q = divlu64(u1, u0, v); - diff = uneg ^ vneg; // Negate q if signs of - q = (q ^ diff) - diff; // u and v differed. + diff = uneg ^ vneg; /* Negate q if signs of */ + q = (q ^ diff) - diff; /* u and v differed. */ - if ((diff ^ q) < 0 && q != 0) { // If overflow, return the largest - overflow: // possible neg. quotient. + if ((diff ^ q) < 0 && q != 0) { /* If overflow, return the + largest */ + overflow: /* possible neg. quotient. */ q = 0x8000000000000000ULL; } return q; @@ -306,8 +327,9 @@ static int64_t divls64(int64_t u1, uint64_t u0, int64_t v) ssize_t tally_mean(const struct tally *tally) { size_t count = tally_num(tally); - if (!count) + if (!count) { return 0; + } if (sizeof(tally->total[0]) == sizeof(uint32_t)) { /* Use standard 64-bit arithmetic. */ @@ -350,10 +372,11 @@ static ssize_t bucket_range(const struct tally *tally, unsigned b, size_t *err) ssize_t min, max; min = bucket_min(tally->min, tally->step_bits, b); - if (b == tally->buckets - 1) + if (b == tally->buckets - 1) { max = tally->max; - else + } else { max = bucket_min(tally->min, tally->step_bits, b+1) - 1; + } /* FIXME: Think harder about cumulative error; is this enough?. */ *err = (max - min + 1) / 2; @@ -368,8 +391,9 @@ ssize_t tally_approx_median(const struct tally *tally, size_t *err) for (i = 0; i < tally->buckets; i++) { total += tally->counts[i]; - if (total * 2 >= count) + if (total * 2 >= count) { break; + } } return bucket_range(tally, i, err); } @@ -403,9 +427,11 @@ static unsigned get_max_bucket(const struct tally *tally) { unsigned int i; - for (i = tally->buckets; i > 0; i--) - if (tally->counts[i-1]) + for (i = tally->buckets; i > 0; i--) { + if (tally->counts[i-1]) { break; + } + } return i; } @@ -430,15 +456,17 @@ char *tally_histogram(const struct tally *tally, /* We create a temporary then renormalize so < height. */ /* FIXME: Antialias properly! */ tmp = tally_new(tally->buckets); - if (!tmp) + if (!tmp) { return NULL; + } tmp->min = tally->min; tmp->max = tally->max; tmp->step_bits = tally->step_bits; memcpy(tmp->counts, tally->counts, sizeof(tally->counts[0]) * tmp->buckets); - while ((max_bucket = get_max_bucket(tmp)) >= height) + while ((max_bucket = get_max_bucket(tmp)) >= height) { renormalize(tmp, tmp->min, tmp->max * 2); + } /* Restore max */ tmp->max = tally->max; tally = tmp; @@ -448,8 +476,9 @@ char *tally_histogram(const struct tally *tally, /* Figure out longest line, for scale. */ largest_bucket = 0; for (i = 0; i < tally->buckets; i++) { - if (tally->counts[i] > largest_bucket) + if (tally->counts[i] > largest_bucket) { largest_bucket = tally->counts[i]; + } } p = graph = (char *)malloc(height * (width + 1) + 1); @@ -465,25 +494,28 @@ char *tally_histogram(const struct tally *tally, row = height - i - 1; count = (double)tally->counts[row] / largest_bucket * (width-1)+1; - if (row == 0) + if (row == 0) { covered = snprintf(p, width, "%zi", tally->min); - else if (row == height - 1) + } else if (row == height - 1) { covered = snprintf(p, width, "%zi", tally->max); - else if (row == bucket_of(tally->min, tally->step_bits, 0)) + } else if (row == bucket_of(tally->min, tally->step_bits, 0)) { *p = '+'; - else + } else { *p = '|'; + } - if (covered > width) + if (covered > width) { covered = width; + } p += covered; - if (count > covered) + if (count > covered) { count -= covered; - else + memset(p, '*', count); + } else { count = 0; + } - memset(p, '*', count); p += count; *p = '\n'; p++;