various: add LICENSE comments.
[ccan] / ccan / tally / tally.c
1 /* Licensed under LGPLv3+ - see LICENSE file for details */
2 #include <ccan/tally/tally.h>
3 #include <ccan/build_assert/build_assert.h>
4 #include <ccan/likely/likely.h>
5 #include <stdint.h>
6 #include <limits.h>
7 #include <string.h>
8 #include <stdio.h>
9 #include <assert.h>
10 #include <stdlib.h>
11
12 #define SIZET_BITS (sizeof(size_t)*CHAR_BIT)
13
14 /* We use power of 2 steps.  I tried being tricky, but it got buggy. */
15 struct tally {
16         ssize_t min, max;
17         size_t total[2];
18         /* This allows limited frequency analysis. */
19         unsigned buckets, step_bits;
20         size_t counts[1 /* Actually: [buckets] */ ];
21 };
22
23 struct tally *tally_new(unsigned buckets)
24 {
25         struct tally *tally;
26
27         /* There is always 1 bucket. */
28         if (buckets == 0)
29                 buckets = 1;
30
31         /* Overly cautious check for overflow. */
32         if (sizeof(*tally) * buckets / sizeof(*tally) != buckets)
33                 return NULL;
34         tally = malloc(sizeof(*tally) + sizeof(tally->counts[0])*(buckets-1));
35         if (tally) {
36                 tally->max = ((size_t)1 << (SIZET_BITS - 1));
37                 tally->min = ~tally->max;
38                 tally->total[0] = tally->total[1] = 0;
39                 tally->buckets = buckets;
40                 tally->step_bits = 0;
41                 memset(tally->counts, 0, sizeof(tally->counts[0])*buckets);
42         }
43         return tally;
44 }
45
46 static unsigned bucket_of(ssize_t min, unsigned step_bits, ssize_t val)
47 {
48         /* Don't over-shift. */
49         if (step_bits == SIZET_BITS)
50                 return 0;
51         assert(step_bits < SIZET_BITS);
52         return (size_t)(val - min) >> step_bits;
53 }
54
55 /* Return the min value in bucket b. */
56 static ssize_t bucket_min(ssize_t min, unsigned step_bits, unsigned b)
57 {
58         /* Don't over-shift. */
59         if (step_bits == SIZET_BITS)
60                 return min;
61         assert(step_bits < SIZET_BITS);
62         return min + ((ssize_t)b << step_bits);
63 }
64
65 /* Does shifting by this many bits truncate the number? */
66 static bool shift_overflows(size_t num, unsigned bits)
67 {
68         if (bits == 0)
69                 return false;
70
71         return ((num << bits) >> 1) != (num << (bits - 1));
72 }
73
74 /* When min or max change, we may need to shuffle the frequency counts. */
75 static void renormalize(struct tally *tally,
76                         ssize_t new_min, ssize_t new_max)
77 {
78         size_t range, spill;
79         unsigned int i, old_min;
80
81         /* Uninitialized?  Don't do anything... */
82         if (tally->max < tally->min)
83                 goto update;
84
85         /* If we don't have sufficient range, increase step bits until
86          * buckets cover entire range of ssize_t anyway. */
87         range = (new_max - new_min) + 1;
88         while (!shift_overflows(tally->buckets, tally->step_bits)
89                && range > ((size_t)tally->buckets << tally->step_bits)) {
90                 /* Collapse down. */
91                 for (i = 1; i < tally->buckets; i++) {
92                         tally->counts[i/2] += tally->counts[i];
93                         tally->counts[i] = 0;
94                 }
95                 tally->step_bits++;
96         }
97
98         /* Now if minimum has dropped, move buckets up. */
99         old_min = bucket_of(new_min, tally->step_bits, tally->min);
100         memmove(tally->counts + old_min,
101                 tally->counts,
102                 sizeof(tally->counts[0]) * (tally->buckets - old_min));
103         memset(tally->counts, 0, sizeof(tally->counts[0]) * old_min);
104
105         /* If we moved boundaries, adjust buckets to that ratio. */
106         spill = (tally->min - new_min) % (1 << tally->step_bits);
107         for (i = 0; i < tally->buckets-1; i++) {
108                 size_t adjust = (tally->counts[i] >> tally->step_bits) * spill;
109                 tally->counts[i] -= adjust;
110                 tally->counts[i+1] += adjust;
111         }
112
113 update:
114         tally->min = new_min;
115         tally->max = new_max;
116 }
117
118 void tally_add(struct tally *tally, ssize_t val)
119 {
120         ssize_t new_min = tally->min, new_max = tally->max;
121         bool need_renormalize = false;
122
123         if (val < tally->min) {
124                 new_min = val;
125                 need_renormalize = true;
126         }
127         if (val > tally->max) {
128                 new_max = val;
129                 need_renormalize = true;
130         }
131         if (need_renormalize)
132                 renormalize(tally, new_min, new_max);
133
134         /* 128-bit arithmetic!  If we didn't want exact mean, we could just
135          * pull it out of counts. */
136         if (val > 0 && tally->total[0] + val < tally->total[0])
137                 tally->total[1]++;
138         else if (val < 0 && tally->total[0] + val > tally->total[0])
139                 tally->total[1]--;
140         tally->total[0] += val;
141         tally->counts[bucket_of(tally->min, tally->step_bits, val)]++;
142 }
143
144 size_t tally_num(const struct tally *tally)
145 {
146         size_t i, num = 0;
147         for (i = 0; i < tally->buckets; i++)
148                 num += tally->counts[i];
149         return num;
150 }
151
152 ssize_t tally_min(const struct tally *tally)
153 {
154         return tally->min;
155 }
156
157 ssize_t tally_max(const struct tally *tally)
158 {
159         return tally->max;
160 }
161
162 /* FIXME: Own ccan module please! */
163 static unsigned fls64(uint64_t val)
164 {
165 #if HAVE_BUILTIN_CLZL
166         if (val <= ULONG_MAX) {
167                 /* This is significantly faster! */
168                 return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
169         } else {
170 #endif
171         uint64_t r = 64;
172
173         if (!val)
174                 return 0;
175         if (!(val & 0xffffffff00000000ull)) {
176                 val <<= 32;
177                 r -= 32;
178         }
179         if (!(val & 0xffff000000000000ull)) {
180                 val <<= 16;
181                 r -= 16;
182         }
183         if (!(val & 0xff00000000000000ull)) {
184                 val <<= 8;
185                 r -= 8;
186         }
187         if (!(val & 0xf000000000000000ull)) {
188                 val <<= 4;
189                 r -= 4;
190         }
191         if (!(val & 0xc000000000000000ull)) {
192                 val <<= 2;
193                 r -= 2;
194         }
195         if (!(val & 0x8000000000000000ull)) {
196                 val <<= 1;
197                 r -= 1;
198         }
199         return r;
200 #if HAVE_BUILTIN_CLZL
201         }
202 #endif
203 }
204
205 /* This is stolen straight from Hacker's Delight. */
206 static uint64_t divlu64(uint64_t u1, uint64_t u0, uint64_t v)
207 {
208         const uint64_t b = 4294967296ULL; // Number base (32 bits).
209         uint32_t un[4],           // Dividend and divisor
210                 vn[2];            // normalized and broken
211                                   // up into halfwords.
212         uint32_t q[2];            // Quotient as halfwords.
213         uint64_t un1, un0,        // Dividend and divisor
214                 vn0;              // as fullwords.
215         uint64_t qhat;            // Estimated quotient digit.
216         uint64_t rhat;            // A remainder.
217         uint64_t p;               // Product of two digits.
218         int64_t s, i, j, t, k;
219
220         if (u1 >= v)              // If overflow, return the largest
221                 return (uint64_t)-1; // possible quotient.
222
223         s = 64 - fls64(v);                // 0 <= s <= 63.
224         vn0 = v << s;             // Normalize divisor.
225         vn[1] = vn0 >> 32;        // Break divisor up into
226         vn[0] = vn0 & 0xFFFFFFFF; // two 32-bit halves.
227
228         // Shift dividend left.
229         un1 = ((u1 << s) | (u0 >> (64 - s))) & (-s >> 63);
230         un0 = u0 << s;
231         un[3] = un1 >> 32;        // Break dividend up into
232         un[2] = un1;              // four 32-bit halfwords
233         un[1] = un0 >> 32;        // Note: storing into
234         un[0] = un0;              // halfwords truncates.
235
236         for (j = 1; j >= 0; j--) {
237                 // Compute estimate qhat of q[j].
238                 qhat = (un[j+2]*b + un[j+1])/vn[1];
239                 rhat = (un[j+2]*b + un[j+1]) - qhat*vn[1];
240         again:
241                 if (qhat >= b || qhat*vn[0] > b*rhat + un[j]) {
242                         qhat = qhat - 1;
243                         rhat = rhat + vn[1];
244                         if (rhat < b) goto again;
245                 }
246
247                 // Multiply and subtract.
248                 k = 0;
249                 for (i = 0; i < 2; i++) {
250                         p = qhat*vn[i];
251                         t = un[i+j] - k - (p & 0xFFFFFFFF);
252                         un[i+j] = t;
253                         k = (p >> 32) - (t >> 32);
254                 }
255                 t = un[j+2] - k;
256                 un[j+2] = t;
257
258                 q[j] = qhat;              // Store quotient digit.
259                 if (t < 0) {              // If we subtracted too
260                         q[j] = q[j] - 1;  // much, add back.
261                         k = 0;
262                         for (i = 0; i < 2; i++) {
263                                 t = un[i+j] + vn[i] + k;
264                                 un[i+j] = t;
265                                 k = t >> 32;
266                         }
267                         un[j+2] = un[j+2] + k;
268                 }
269         } // End j.
270
271         return q[1]*b + q[0];
272 }
273
274 static int64_t divls64(int64_t u1, uint64_t u0, int64_t v)
275 {
276         int64_t q, uneg, vneg, diff, borrow;
277
278         uneg = u1 >> 63;          // -1 if u < 0.
279         if (uneg) {               // Compute the absolute
280                 u0 = -u0;         // value of the dividend u.
281                 borrow = (u0 != 0);
282                 u1 = -u1 - borrow;
283         }
284
285         vneg = v >> 63;           // -1 if v < 0.
286         v = (v ^ vneg) - vneg;    // Absolute value of v.
287
288         if ((uint64_t)u1 >= (uint64_t)v)
289                 goto overflow;
290
291         q = divlu64(u1, u0, v);
292
293         diff = uneg ^ vneg;       // Negate q if signs of
294         q = (q ^ diff) - diff;    // u and v differed.
295
296         if ((diff ^ q) < 0 && q != 0) {    // If overflow, return the largest
297         overflow:                          // possible neg. quotient.
298                 q = 0x8000000000000000ULL;
299         }
300         return q;
301 }
302
303 ssize_t tally_mean(const struct tally *tally)
304 {
305         size_t count = tally_num(tally);
306         if (!count)
307                 return 0;
308
309         if (sizeof(tally->total[0]) == sizeof(uint32_t)) {
310                 /* Use standard 64-bit arithmetic. */
311                 int64_t total = tally->total[0]
312                         | (((uint64_t)tally->total[1]) << 32);
313                 return total / count;
314         }
315         return divls64(tally->total[1], tally->total[0], count);
316 }
317
318 ssize_t tally_total(const struct tally *tally, ssize_t *overflow)
319 {
320         if (overflow) {
321                 *overflow = tally->total[1];
322                 return tally->total[0];
323         }
324
325         /* If result is negative, make sure we can represent it. */
326         if (tally->total[1] & ((size_t)1 << (SIZET_BITS-1))) {
327                 /* Must have only underflowed once, and must be able to
328                  * represent result at ssize_t. */
329                 if ((~tally->total[1])+1 != 0
330                     || (ssize_t)tally->total[0] >= 0) {
331                         /* Underflow, return minimum. */
332                         return (ssize_t)((size_t)1 << (SIZET_BITS - 1));
333                 }
334         } else {
335                 /* Result is positive, must not have overflowed, and must be
336                  * able to represent as ssize_t. */
337                 if (tally->total[1] || (ssize_t)tally->total[0] < 0) {
338                         /* Overflow.  Return maximum. */
339                         return (ssize_t)~((size_t)1 << (SIZET_BITS - 1));
340                 }
341         }
342         return tally->total[0];
343 }
344
345 static ssize_t bucket_range(const struct tally *tally, unsigned b, size_t *err)
346 {
347         ssize_t min, max;
348
349         min = bucket_min(tally->min, tally->step_bits, b);
350         if (b == tally->buckets - 1)
351                 max = tally->max;
352         else
353                 max = bucket_min(tally->min, tally->step_bits, b+1) - 1;
354
355         /* FIXME: Think harder about cumulative error; is this enough?. */
356         *err = (max - min + 1) / 2;
357         /* Avoid overflow. */
358         return min + (max - min) / 2;
359 }
360
361 ssize_t tally_approx_median(const struct tally *tally, size_t *err)
362 {
363         size_t count = tally_num(tally), total = 0;
364         unsigned int i;
365
366         for (i = 0; i < tally->buckets; i++) {
367                 total += tally->counts[i];
368                 if (total * 2 >= count)
369                         break;
370         }
371         return bucket_range(tally, i, err);
372 }
373
374 ssize_t tally_approx_mode(const struct tally *tally, size_t *err)
375 {
376         unsigned int i, min_best = 0, max_best = 0;
377
378         for (i = 0; i < tally->buckets; i++) {
379                 if (tally->counts[i] > tally->counts[min_best]) {
380                         min_best = max_best = i;
381                 } else if (tally->counts[i] == tally->counts[min_best]) {
382                         max_best = i;
383                 }
384         }
385
386         /* We can have more than one best, making our error huge. */
387         if (min_best != max_best) {
388                 ssize_t min, max;
389                 min = bucket_range(tally, min_best, err);
390                 max = bucket_range(tally, max_best, err);
391                 max += *err;
392                 *err += (size_t)(max - min);
393                 return min + (max - min) / 2;
394         }
395
396         return bucket_range(tally, min_best, err);
397 }
398
399 static unsigned get_max_bucket(const struct tally *tally)
400 {
401         unsigned int i;
402
403         for (i = tally->buckets; i > 0; i--)
404                 if (tally->counts[i-1])
405                         break;
406         return i;
407 }
408
409 char *tally_histogram(const struct tally *tally,
410                       unsigned width, unsigned height)
411 {
412         unsigned int i, count, max_bucket, largest_bucket;
413         struct tally *tmp;
414         char *graph, *p;
415
416         assert(width >= TALLY_MIN_HISTO_WIDTH);
417         assert(height >= TALLY_MIN_HISTO_HEIGHT);
418
419         /* Ignore unused buckets. */
420         max_bucket = get_max_bucket(tally);
421
422         /* FIXME: It'd be nice to smooth here... */
423         if (height >= max_bucket) {
424                 height = max_bucket;
425                 tmp = NULL;
426         } else {
427                 /* We create a temporary then renormalize so < height. */
428                 /* FIXME: Antialias properly! */
429                 tmp = tally_new(tally->buckets);
430                 if (!tmp)
431                         return NULL;
432                 tmp->min = tally->min;
433                 tmp->max = tally->max;
434                 tmp->step_bits = tally->step_bits;
435                 memcpy(tmp->counts, tally->counts,
436                        sizeof(tally->counts[0]) * tmp->buckets);
437                 while ((max_bucket = get_max_bucket(tmp)) >= height)
438                         renormalize(tmp, tmp->min, tmp->max * 2);
439                 /* Restore max */
440                 tmp->max = tally->max;
441                 tally = tmp;
442                 height = max_bucket;
443         }
444
445         /* Figure out longest line, for scale. */
446         largest_bucket = 0;
447         for (i = 0; i < tally->buckets; i++) {
448                 if (tally->counts[i] > largest_bucket)
449                         largest_bucket = tally->counts[i];
450         }
451
452         p = graph = malloc(height * (width + 1) + 1);
453         if (!graph) {
454                 free(tmp);
455                 return NULL;
456         }
457
458         for (i = 0; i < height; i++) {
459                 unsigned covered = 1, row;
460
461                 /* People expect minimum at the bottom. */
462                 row = height - i - 1;
463                 count = (double)tally->counts[row] / largest_bucket * (width-1)+1;
464
465                 if (row == 0)
466                         covered = snprintf(p, width, "%zi", tally->min);
467                 else if (row == height - 1)
468                         covered = snprintf(p, width, "%zi", tally->max);
469                 else if (row == bucket_of(tally->min, tally->step_bits, 0))
470                         *p = '+';
471                 else
472                         *p = '|';
473
474                 if (covered > width)
475                         covered = width;
476                 p += covered;
477
478                 if (count > covered)
479                         count -= covered;
480                 else
481                         count = 0;
482
483                 memset(p, '*', count);
484                 p += count;
485                 *p = '\n';
486                 p++;
487         }
488         *p = '\0';
489         free(tmp);
490         return graph;
491 }