2 #include <ccan/tally/tally.h>
3 #include <ccan/build_assert/build_assert.h>
4 #include <ccan/likely/likely.h>
11 #define MAX_STEP_BITS (sizeof(size_t)*CHAR_BIT)
13 /* We use power of 2 steps. I tried being tricky, but it got buggy. */
17 /* This allows limited frequency analysis. */
20 size_t counts[1 /* [buckets] */ ];
23 struct tally *tally_new(size_t buckets)
27 /* Check for overflow. */
28 if (buckets && SIZE_MAX / buckets < sizeof(tally->counts[0]))
30 tally = malloc(sizeof(*tally) + sizeof(tally->counts[0])*buckets);
32 /* SSIZE_MAX isn't portable, so make it one of these types. */
33 BUILD_ASSERT(sizeof(tally->min) == sizeof(int)
34 || sizeof(tally->min) == sizeof(long)
35 || sizeof(tally->min) == sizeof(long long));
36 if (sizeof(tally->min) == sizeof(int)) {
39 } else if (sizeof(tally->min) == sizeof(long)) {
40 tally->min = LONG_MAX;
41 tally->max = LONG_MIN;
42 } else if (sizeof(tally->min) == sizeof(long long)) {
43 tally->min = (ssize_t)LLONG_MAX;
44 tally->max = (ssize_t)LLONG_MIN;
46 tally->total[0] = tally->total[1] = 0;
47 /* There is always 1 bucket. */
48 tally->buckets = buckets+1;
50 memset(tally->counts, 0, sizeof(tally->counts[0])*(buckets+1));
55 static unsigned bucket_of(ssize_t min, unsigned step_bits, ssize_t val)
57 /* Don't over-shift. */
58 if (step_bits == MAX_STEP_BITS)
60 assert(step_bits < MAX_STEP_BITS);
61 return (size_t)(val - min) >> step_bits;
64 /* Return the min value in bucket b. */
65 static ssize_t bucket_min(ssize_t min, unsigned step_bits, unsigned b)
67 /* Don't over-shift. */
68 if (step_bits == MAX_STEP_BITS)
70 assert(step_bits < MAX_STEP_BITS);
71 return min + ((ssize_t)b << step_bits);
74 /* Does shifting by this many bits truncate the number? */
75 static bool shift_overflows(size_t num, unsigned bits)
80 return ((num << bits) >> 1) != (num << (bits - 1));
83 /* When min or max change, we may need to shuffle the frequency counts. */
84 static void renormalize(struct tally *tally,
85 ssize_t new_min, ssize_t new_max)
88 unsigned int i, old_min;
90 /* Uninitialized? Don't do anything... */
91 if (tally->max < tally->min)
94 /* If we don't have sufficient range, increase step bits until
95 * buckets cover entire range of ssize_t anyway. */
96 range = (new_max - new_min) + 1;
97 while (!shift_overflows(tally->buckets, tally->step_bits)
98 && range > ((size_t)tally->buckets << tally->step_bits)) {
100 for (i = 1; i < tally->buckets; i++) {
101 tally->counts[i/2] += tally->counts[i];
102 tally->counts[i] = 0;
107 /* Now if minimum has dropped, move buckets up. */
108 old_min = bucket_of(new_min, tally->step_bits, tally->min);
109 memmove(tally->counts + old_min,
111 sizeof(tally->counts[0]) * (tally->buckets - old_min));
112 memset(tally->counts, 0, sizeof(tally->counts[0]) * old_min);
114 /* If we moved boundaries, adjust buckets to that ratio. */
115 spill = (tally->min - new_min) % (1 << tally->step_bits);
116 for (i = 0; i < tally->buckets-1; i++) {
117 size_t adjust = (tally->counts[i] >> tally->step_bits) * spill;
118 tally->counts[i] -= adjust;
119 tally->counts[i+1] += adjust;
123 tally->min = new_min;
124 tally->max = new_max;
127 void tally_add(struct tally *tally, ssize_t val)
129 ssize_t new_min = tally->min, new_max = tally->max;
130 bool need_renormalize = false;
132 if (val < tally->min) {
134 need_renormalize = true;
136 if (val > tally->max) {
138 need_renormalize = true;
140 if (need_renormalize)
141 renormalize(tally, new_min, new_max);
143 /* 128-bit arithmetic! If we didn't want exact mean, we could just
144 * pull it out of counts. */
145 if (val > 0 && tally->total[0] + val < tally->total[0])
147 else if (val < 0 && tally->total[0] + val > tally->total[0])
149 tally->total[0] += val;
150 tally->counts[bucket_of(tally->min, tally->step_bits, val)]++;
153 size_t tally_num(const struct tally *tally)
156 for (i = 0; i < tally->buckets; i++)
157 num += tally->counts[i];
161 ssize_t tally_min(const struct tally *tally)
166 ssize_t tally_max(const struct tally *tally)
171 /* FIXME: Own ccan module please! */
172 static unsigned fls64(uint64_t val)
174 #if HAVE_BUILTIN_CLZL
175 if (val <= ULONG_MAX) {
176 /* This is significantly faster! */
177 return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
184 if (!(val & 0xffffffff00000000ull)) {
188 if (!(val & 0xffff000000000000ull)) {
192 if (!(val & 0xff00000000000000ull)) {
196 if (!(val & 0xf000000000000000ull)) {
200 if (!(val & 0xc000000000000000ull)) {
204 if (!(val & 0x8000000000000000ull)) {
209 #if HAVE_BUILTIN_CLZL
214 /* This is stolen straight from Hacker's Delight. */
215 static uint64_t divlu64(uint64_t u1, uint64_t u0, uint64_t v)
217 const uint64_t b = 4294967296ULL; // Number base (32 bits).
218 uint32_t un[4], // Dividend and divisor
219 vn[2]; // normalized and broken
220 // up into halfwords.
221 uint32_t q[2]; // Quotient as halfwords.
222 uint64_t un1, un0, // Dividend and divisor
223 vn0; // as fullwords.
224 uint64_t qhat; // Estimated quotient digit.
225 uint64_t rhat; // A remainder.
226 uint64_t p; // Product of two digits.
227 int64_t s, i, j, t, k;
229 if (u1 >= v) // If overflow, return the largest
230 return (uint64_t)-1; // possible quotient.
232 s = 64 - fls64(v); // 0 <= s <= 63.
233 vn0 = v << s; // Normalize divisor.
234 vn[1] = vn0 >> 32; // Break divisor up into
235 vn[0] = vn0 & 0xFFFFFFFF; // two 32-bit halves.
237 // Shift dividend left.
238 un1 = ((u1 << s) | (u0 >> (64 - s))) & (-s >> 63);
240 un[3] = un1 >> 32; // Break dividend up into
241 un[2] = un1; // four 32-bit halfwords
242 un[1] = un0 >> 32; // Note: storing into
243 un[0] = un0; // halfwords truncates.
245 for (j = 1; j >= 0; j--) {
246 // Compute estimate qhat of q[j].
247 qhat = (un[j+2]*b + un[j+1])/vn[1];
248 rhat = (un[j+2]*b + un[j+1]) - qhat*vn[1];
250 if (qhat >= b || qhat*vn[0] > b*rhat + un[j]) {
253 if (rhat < b) goto again;
256 // Multiply and subtract.
258 for (i = 0; i < 2; i++) {
260 t = un[i+j] - k - (p & 0xFFFFFFFF);
262 k = (p >> 32) - (t >> 32);
267 q[j] = qhat; // Store quotient digit.
268 if (t < 0) { // If we subtracted too
269 q[j] = q[j] - 1; // much, add back.
271 for (i = 0; i < 2; i++) {
272 t = un[i+j] + vn[i] + k;
276 un[j+2] = un[j+2] + k;
280 return q[1]*b + q[0];
283 static int64_t divls64(int64_t u1, uint64_t u0, int64_t v)
285 int64_t q, uneg, vneg, diff, borrow;
287 uneg = u1 >> 63; // -1 if u < 0.
288 if (uneg) { // Compute the absolute
289 u0 = -u0; // value of the dividend u.
294 vneg = v >> 63; // -1 if v < 0.
295 v = (v ^ vneg) - vneg; // Absolute value of v.
297 if ((uint64_t)u1 >= (uint64_t)v)
300 q = divlu64(u1, u0, v);
302 diff = uneg ^ vneg; // Negate q if signs of
303 q = (q ^ diff) - diff; // u and v differed.
305 if ((diff ^ q) < 0 && q != 0) { // If overflow, return the largest
306 overflow: // possible neg. quotient.
307 q = 0x8000000000000000ULL;
312 ssize_t tally_mean(const struct tally *tally)
314 size_t count = tally_num(tally);
318 if (sizeof(tally->total[0]) == sizeof(uint32_t)) {
319 /* Use standard 64-bit arithmetic. */
320 int64_t total = tally->total[0]
321 | (((uint64_t)tally->total[1]) << 32);
322 return total / count;
324 return divls64(tally->total[1], tally->total[0], count);
327 static ssize_t bucket_range(const struct tally *tally, unsigned b, size_t *err)
331 min = bucket_min(tally->min, tally->step_bits, b);
332 if (b == tally->buckets - 1)
335 max = bucket_min(tally->min, tally->step_bits, b+1) - 1;
337 /* FIXME: Think harder about cumulative error; is this enough?. */
338 *err = (max - min + 1) / 2;
339 /* Avoid overflow. */
340 return min + (max - min) / 2;
343 ssize_t tally_approx_median(const struct tally *tally, size_t *err)
345 size_t count = tally_num(tally), total = 0;
348 for (i = 0; i < tally->buckets; i++) {
349 total += tally->counts[i];
350 if (total * 2 >= count)
353 return bucket_range(tally, i, err);
356 ssize_t tally_approx_mode(const struct tally *tally, size_t *err)
358 unsigned int i, min_best = 0, max_best = 0;
360 for (i = 0; i < tally->buckets; i++) {
361 if (tally->counts[i] > tally->counts[min_best]) {
362 min_best = max_best = i;
363 } else if (tally->counts[i] == tally->counts[min_best]) {
368 /* We can have more than one best, making our error huge. */
369 if (min_best != max_best) {
371 min = bucket_range(tally, min_best, err);
372 max = bucket_range(tally, max_best, err);
374 *err += (size_t)(max - min);
375 return min + (max - min) / 2;
378 return bucket_range(tally, min_best, err);
381 static unsigned get_max_bucket(const struct tally *tally)
385 for (i = tally->buckets; i > 0; i--)
386 if (tally->counts[i-1])
391 char *tally_histogram(const struct tally *tally,
392 unsigned width, unsigned height)
394 unsigned int i, count, max_bucket, largest_bucket;
398 assert(width >= TALLY_MIN_HISTO_WIDTH);
399 assert(height >= TALLY_MIN_HISTO_HEIGHT);
401 /* Ignore unused buckets. */
402 max_bucket = get_max_bucket(tally);
404 /* FIXME: It'd be nice to smooth here... */
405 if (height >= max_bucket) {
409 /* We create a temporary then renormalize so < height. */
410 /* FIXME: Antialias properly! */
411 tmp = tally_new(tally->buckets-1);
414 tmp->min = tally->min;
415 tmp->max = tally->max;
416 tmp->step_bits = tally->step_bits;
417 memcpy(tmp->counts, tally->counts,
418 sizeof(tally->counts[0]) * tmp->buckets);
419 while ((max_bucket = get_max_bucket(tmp)) >= height)
420 renormalize(tmp, tmp->min, tmp->max *= 2);
422 tmp->max = tally->max;
427 /* Figure out longest line, for scale. */
429 for (i = 0; i < tally->buckets; i++) {
430 if (tally->counts[i] > largest_bucket)
431 largest_bucket = tally->counts[i];
434 p = graph = malloc(height * (width + 1) + 1);
439 for (i = 0; i < height; i++) {
440 unsigned covered = 0;
441 count = (double)tally->counts[i] / largest_bucket * width;
444 covered = snprintf(p, width, "%zi", tally->min);
445 else if (i == height - 1)
446 covered = snprintf(p, width, "%zi", tally->max);
456 memset(p, '*', count);