]> git.ozlabs.org Git - ccan/blob - ccan/tally/tally.c
tally: simplify, add Y axis to graph.
[ccan] / ccan / tally / tally.c
1 #include "config.h"
2 #include <ccan/tally/tally.h>
3 #include <ccan/build_assert/build_assert.h>
4 #include <ccan/likely/likely.h>
5 #include <stdint.h>
6 #include <limits.h>
7 #include <string.h>
8 #include <stdio.h>
9 #include <assert.h>
10
11 #define SIZET_BITS (sizeof(size_t)*CHAR_BIT)
12
13 /* We use power of 2 steps.  I tried being tricky, but it got buggy. */
14 struct tally {
15         ssize_t min, max;
16         size_t total[2];
17         /* This allows limited frequency analysis. */
18         unsigned buckets, step_bits;
19         size_t counts[1 /* [buckets] */ ];
20 };
21
22 struct tally *tally_new(unsigned buckets)
23 {
24         struct tally *tally;
25
26         /* Check for overflow. */
27         if (buckets && SIZE_MAX / buckets < sizeof(tally->counts[0]))
28                 return NULL;
29         tally = malloc(sizeof(*tally) + sizeof(tally->counts[0])*buckets);
30         if (tally) {
31                 tally->max = ((size_t)1 << (SIZET_BITS - 1));
32                 tally->min = ~tally->max;
33                 tally->total[0] = tally->total[1] = 0;
34                 /* There is always 1 bucket. */
35                 tally->buckets = buckets+1;
36                 tally->step_bits = 0;
37                 memset(tally->counts, 0, sizeof(tally->counts[0])*(buckets+1));
38         }
39         return tally;
40 }
41
42 static unsigned bucket_of(ssize_t min, unsigned step_bits, ssize_t val)
43 {
44         /* Don't over-shift. */
45         if (step_bits == SIZET_BITS)
46                 return 0;
47         assert(step_bits < SIZET_BITS);
48         return (size_t)(val - min) >> step_bits;
49 }
50
51 /* Return the min value in bucket b. */
52 static ssize_t bucket_min(ssize_t min, unsigned step_bits, unsigned b)
53 {
54         /* Don't over-shift. */
55         if (step_bits == SIZET_BITS)
56                 return min;
57         assert(step_bits < SIZET_BITS);
58         return min + ((ssize_t)b << step_bits);
59 }
60
61 /* Does shifting by this many bits truncate the number? */
62 static bool shift_overflows(size_t num, unsigned bits)
63 {
64         if (bits == 0)
65                 return false;
66
67         return ((num << bits) >> 1) != (num << (bits - 1));
68 }
69
70 /* When min or max change, we may need to shuffle the frequency counts. */
71 static void renormalize(struct tally *tally,
72                         ssize_t new_min, ssize_t new_max)
73 {
74         size_t range, spill;
75         unsigned int i, old_min;
76
77         /* Uninitialized?  Don't do anything... */
78         if (tally->max < tally->min)
79                 goto update;
80
81         /* If we don't have sufficient range, increase step bits until
82          * buckets cover entire range of ssize_t anyway. */
83         range = (new_max - new_min) + 1;
84         while (!shift_overflows(tally->buckets, tally->step_bits)
85                && range > ((size_t)tally->buckets << tally->step_bits)) {
86                 /* Collapse down. */
87                 for (i = 1; i < tally->buckets; i++) {
88                         tally->counts[i/2] += tally->counts[i];
89                         tally->counts[i] = 0;
90                 }
91                 tally->step_bits++;
92         }
93
94         /* Now if minimum has dropped, move buckets up. */
95         old_min = bucket_of(new_min, tally->step_bits, tally->min);
96         memmove(tally->counts + old_min,
97                 tally->counts,
98                 sizeof(tally->counts[0]) * (tally->buckets - old_min));
99         memset(tally->counts, 0, sizeof(tally->counts[0]) * old_min);
100
101         /* If we moved boundaries, adjust buckets to that ratio. */
102         spill = (tally->min - new_min) % (1 << tally->step_bits);
103         for (i = 0; i < tally->buckets-1; i++) {
104                 size_t adjust = (tally->counts[i] >> tally->step_bits) * spill;
105                 tally->counts[i] -= adjust;
106                 tally->counts[i+1] += adjust;
107         }
108
109 update:
110         tally->min = new_min;
111         tally->max = new_max;
112 }
113
114 void tally_add(struct tally *tally, ssize_t val)
115 {
116         ssize_t new_min = tally->min, new_max = tally->max;
117         bool need_renormalize = false;
118
119         if (val < tally->min) {
120                 new_min = val;
121                 need_renormalize = true;
122         }
123         if (val > tally->max) {
124                 new_max = val;
125                 need_renormalize = true;
126         }
127         if (need_renormalize)
128                 renormalize(tally, new_min, new_max);
129
130         /* 128-bit arithmetic!  If we didn't want exact mean, we could just
131          * pull it out of counts. */
132         if (val > 0 && tally->total[0] + val < tally->total[0])
133                 tally->total[1]++;
134         else if (val < 0 && tally->total[0] + val > tally->total[0])
135                 tally->total[1]--;
136         tally->total[0] += val;
137         tally->counts[bucket_of(tally->min, tally->step_bits, val)]++;
138 }
139
140 size_t tally_num(const struct tally *tally)
141 {
142         size_t i, num = 0;
143         for (i = 0; i < tally->buckets; i++)
144                 num += tally->counts[i];
145         return num;
146 }
147
148 ssize_t tally_min(const struct tally *tally)
149 {
150         return tally->min;
151 }
152
153 ssize_t tally_max(const struct tally *tally)
154 {
155         return tally->max;
156 }
157
158 /* FIXME: Own ccan module please! */
159 static unsigned fls64(uint64_t val)
160 {
161 #if HAVE_BUILTIN_CLZL
162         if (val <= ULONG_MAX) {
163                 /* This is significantly faster! */
164                 return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
165         } else {
166 #endif
167         uint64_t r = 64;
168
169         if (!val)
170                 return 0;
171         if (!(val & 0xffffffff00000000ull)) {
172                 val <<= 32;
173                 r -= 32;
174         }
175         if (!(val & 0xffff000000000000ull)) {
176                 val <<= 16;
177                 r -= 16;
178         }
179         if (!(val & 0xff00000000000000ull)) {
180                 val <<= 8;
181                 r -= 8;
182         }
183         if (!(val & 0xf000000000000000ull)) {
184                 val <<= 4;
185                 r -= 4;
186         }
187         if (!(val & 0xc000000000000000ull)) {
188                 val <<= 2;
189                 r -= 2;
190         }
191         if (!(val & 0x8000000000000000ull)) {
192                 val <<= 1;
193                 r -= 1;
194         }
195         return r;
196 #if HAVE_BUILTIN_CLZL
197         }
198 #endif
199 }
200
201 /* This is stolen straight from Hacker's Delight. */
202 static uint64_t divlu64(uint64_t u1, uint64_t u0, uint64_t v)
203 {
204         const uint64_t b = 4294967296ULL; // Number base (32 bits).
205         uint32_t un[4],           // Dividend and divisor
206                 vn[2];            // normalized and broken
207                                   // up into halfwords.
208         uint32_t q[2];            // Quotient as halfwords.
209         uint64_t un1, un0,        // Dividend and divisor
210                 vn0;              // as fullwords.
211         uint64_t qhat;            // Estimated quotient digit.
212         uint64_t rhat;            // A remainder.
213         uint64_t p;               // Product of two digits.
214         int64_t s, i, j, t, k;
215
216         if (u1 >= v)              // If overflow, return the largest
217                 return (uint64_t)-1; // possible quotient.
218
219         s = 64 - fls64(v);                // 0 <= s <= 63.
220         vn0 = v << s;             // Normalize divisor.
221         vn[1] = vn0 >> 32;        // Break divisor up into
222         vn[0] = vn0 & 0xFFFFFFFF; // two 32-bit halves.
223
224         // Shift dividend left.
225         un1 = ((u1 << s) | (u0 >> (64 - s))) & (-s >> 63);
226         un0 = u0 << s;
227         un[3] = un1 >> 32;        // Break dividend up into
228         un[2] = un1;              // four 32-bit halfwords
229         un[1] = un0 >> 32;        // Note: storing into
230         un[0] = un0;              // halfwords truncates.
231
232         for (j = 1; j >= 0; j--) {
233                 // Compute estimate qhat of q[j].
234                 qhat = (un[j+2]*b + un[j+1])/vn[1];
235                 rhat = (un[j+2]*b + un[j+1]) - qhat*vn[1];
236         again:
237                 if (qhat >= b || qhat*vn[0] > b*rhat + un[j]) {
238                         qhat = qhat - 1;
239                         rhat = rhat + vn[1];
240                         if (rhat < b) goto again;
241                 }
242
243                 // Multiply and subtract.
244                 k = 0;
245                 for (i = 0; i < 2; i++) {
246                         p = qhat*vn[i];
247                         t = un[i+j] - k - (p & 0xFFFFFFFF);
248                         un[i+j] = t;
249                         k = (p >> 32) - (t >> 32);
250                 }
251                 t = un[j+2] - k;
252                 un[j+2] = t;
253
254                 q[j] = qhat;              // Store quotient digit.
255                 if (t < 0) {              // If we subtracted too
256                         q[j] = q[j] - 1;  // much, add back.
257                         k = 0;
258                         for (i = 0; i < 2; i++) {
259                                 t = un[i+j] + vn[i] + k;
260                                 un[i+j] = t;
261                                 k = t >> 32;
262                         }
263                         un[j+2] = un[j+2] + k;
264                 }
265         } // End j.
266
267         return q[1]*b + q[0];
268 }
269
270 static int64_t divls64(int64_t u1, uint64_t u0, int64_t v)
271 {
272         int64_t q, uneg, vneg, diff, borrow;
273
274         uneg = u1 >> 63;          // -1 if u < 0.
275         if (uneg) {               // Compute the absolute
276                 u0 = -u0;         // value of the dividend u.
277                 borrow = (u0 != 0);
278                 u1 = -u1 - borrow;
279         }
280
281         vneg = v >> 63;           // -1 if v < 0.
282         v = (v ^ vneg) - vneg;    // Absolute value of v.
283
284         if ((uint64_t)u1 >= (uint64_t)v)
285                 goto overflow;
286
287         q = divlu64(u1, u0, v);
288
289         diff = uneg ^ vneg;       // Negate q if signs of
290         q = (q ^ diff) - diff;    // u and v differed.
291
292         if ((diff ^ q) < 0 && q != 0) {    // If overflow, return the largest
293         overflow:                          // possible neg. quotient.
294                 q = 0x8000000000000000ULL;
295         }
296         return q;
297 }
298
299 ssize_t tally_mean(const struct tally *tally)
300 {
301         size_t count = tally_num(tally);
302         if (!count)
303                 return 0;
304
305         if (sizeof(tally->total[0]) == sizeof(uint32_t)) {
306                 /* Use standard 64-bit arithmetic. */
307                 int64_t total = tally->total[0]
308                         | (((uint64_t)tally->total[1]) << 32);
309                 return total / count;
310         }
311         return divls64(tally->total[1], tally->total[0], count);
312 }
313
314 ssize_t tally_total(const struct tally *tally, ssize_t *overflow)
315 {
316         if (overflow) {
317                 *overflow = tally->total[1];
318                 return tally->total[0];
319         }
320
321         /* If result is negative, make sure we can represent it. */
322         if (tally->total[1] & (1 << (SIZET_BITS-1))) {
323                 /* Must have only underflowed once, and must be able to
324                  * represent result at ssize_t. */
325                 if ((~tally->total[1])+1 != 0
326                     || (ssize_t)tally->total[0] >= 0) {
327                         /* Underflow, return minimum. */
328                         return (ssize_t)((size_t)1 << (SIZET_BITS - 1));
329                 }
330         } else {
331                 /* Result is positive, must not have overflowed, and must be
332                  * able to represent as ssize_t. */
333                 if (tally->total[1] || (ssize_t)tally->total[0] < 0) {
334                         /* Overflow.  Return maximum. */
335                         return (ssize_t)~((size_t)1 << (SIZET_BITS - 1));
336                 }
337         }
338         return tally->total[0];
339 }
340
341 static ssize_t bucket_range(const struct tally *tally, unsigned b, size_t *err)
342 {
343         ssize_t min, max;
344
345         min = bucket_min(tally->min, tally->step_bits, b);
346         if (b == tally->buckets - 1)
347                 max = tally->max;
348         else
349                 max = bucket_min(tally->min, tally->step_bits, b+1) - 1;
350
351         /* FIXME: Think harder about cumulative error; is this enough?. */
352         *err = (max - min + 1) / 2;
353         /* Avoid overflow. */
354         return min + (max - min) / 2;
355 }
356
357 ssize_t tally_approx_median(const struct tally *tally, size_t *err)
358 {
359         size_t count = tally_num(tally), total = 0;
360         unsigned int i;
361
362         for (i = 0; i < tally->buckets; i++) {
363                 total += tally->counts[i];
364                 if (total * 2 >= count)
365                         break;
366         }
367         return bucket_range(tally, i, err);
368 }
369
370 ssize_t tally_approx_mode(const struct tally *tally, size_t *err)
371 {
372         unsigned int i, min_best = 0, max_best = 0;
373
374         for (i = 0; i < tally->buckets; i++) {
375                 if (tally->counts[i] > tally->counts[min_best]) {
376                         min_best = max_best = i;
377                 } else if (tally->counts[i] == tally->counts[min_best]) {
378                         max_best = i;
379                 }
380         }
381
382         /* We can have more than one best, making our error huge. */
383         if (min_best != max_best) {
384                 ssize_t min, max;
385                 min = bucket_range(tally, min_best, err);
386                 max = bucket_range(tally, max_best, err);
387                 max += *err;
388                 *err += (size_t)(max - min);
389                 return min + (max - min) / 2;
390         }
391
392         return bucket_range(tally, min_best, err);
393 }
394
395 static unsigned get_max_bucket(const struct tally *tally)
396 {
397         unsigned int i;
398
399         for (i = tally->buckets; i > 0; i--)
400                 if (tally->counts[i-1])
401                         break;
402         return i;
403 }
404
405 char *tally_histogram(const struct tally *tally,
406                       unsigned width, unsigned height)
407 {
408         unsigned int i, count, max_bucket, largest_bucket;
409         struct tally *tmp;
410         char *graph, *p;
411
412         assert(width >= TALLY_MIN_HISTO_WIDTH);
413         assert(height >= TALLY_MIN_HISTO_HEIGHT);
414
415         /* Ignore unused buckets. */
416         max_bucket = get_max_bucket(tally);
417
418         /* FIXME: It'd be nice to smooth here... */
419         if (height >= max_bucket) {
420                 height = max_bucket;
421                 tmp = NULL;
422         } else {
423                 /* We create a temporary then renormalize so < height. */
424                 /* FIXME: Antialias properly! */
425                 tmp = tally_new(tally->buckets-1);
426                 if (!tmp)
427                         return NULL;
428                 tmp->min = tally->min;
429                 tmp->max = tally->max;
430                 tmp->step_bits = tally->step_bits;
431                 memcpy(tmp->counts, tally->counts,
432                        sizeof(tally->counts[0]) * tmp->buckets);
433                 while ((max_bucket = get_max_bucket(tmp)) >= height)
434                         renormalize(tmp, tmp->min, tmp->max * 2);
435                 /* Restore max */
436                 tmp->max = tally->max;
437                 tally = tmp;
438                 height = max_bucket;
439         }
440
441         /* Figure out longest line, for scale. */
442         largest_bucket = 0;
443         for (i = 0; i < tally->buckets; i++) {
444                 if (tally->counts[i] > largest_bucket)
445                         largest_bucket = tally->counts[i];
446         }
447
448         p = graph = malloc(height * (width + 1) + 1);
449         if (!graph) {
450                 free(tmp);
451                 return NULL;
452         }
453
454         for (i = 0; i < height; i++) {
455                 unsigned covered = 1;
456                 count = (double)tally->counts[i] / largest_bucket * (width-1)+1;
457
458                 if (i == 0)
459                         covered = snprintf(p, width, "%zi", tally->min);
460                 else if (i == height - 1)
461                         covered = snprintf(p, width, "%zi", tally->max);
462                 else if (i == bucket_of(tally->min, tally->step_bits, 0))
463                         *p = '+';
464                 else
465                         *p = '|';
466
467                 if (covered > width)
468                         covered = width;
469                 p += covered;
470
471                 if (count > covered)
472                         count -= covered;
473                 else
474                         count = 0;
475
476                 memset(p, '*', count);
477                 p += count;
478                 *p = '\n';
479                 p++;
480         }
481         *p = '\0';
482         free(tmp);
483         return graph;
484 }