tally: fix FreeBSD compile, memleak in tests.
[ccan] / ccan / tally / tally.c
1 #include <ccan/tally/tally.h>
2 #include <ccan/build_assert/build_assert.h>
3 #include <ccan/likely/likely.h>
4 #include <stdint.h>
5 #include <limits.h>
6 #include <string.h>
7 #include <stdio.h>
8 #include <assert.h>
9 #include <stdlib.h>
10
11 #define SIZET_BITS (sizeof(size_t)*CHAR_BIT)
12
13 /* We use power of 2 steps.  I tried being tricky, but it got buggy. */
14 struct tally {
15         ssize_t min, max;
16         size_t total[2];
17         /* This allows limited frequency analysis. */
18         unsigned buckets, step_bits;
19         size_t counts[1 /* Actually: [buckets] */ ];
20 };
21
22 struct tally *tally_new(unsigned buckets)
23 {
24         struct tally *tally;
25
26         /* There is always 1 bucket. */
27         if (buckets == 0)
28                 buckets = 1;
29
30         /* Check for overflow. */
31         if (buckets && SIZE_MAX / buckets < sizeof(tally->counts[0]))
32                 return NULL;
33         tally = malloc(sizeof(*tally) + sizeof(tally->counts[0])*(buckets-1));
34         if (tally) {
35                 tally->max = ((size_t)1 << (SIZET_BITS - 1));
36                 tally->min = ~tally->max;
37                 tally->total[0] = tally->total[1] = 0;
38                 tally->buckets = buckets;
39                 tally->step_bits = 0;
40                 memset(tally->counts, 0, sizeof(tally->counts[0])*buckets);
41         }
42         return tally;
43 }
44
45 static unsigned bucket_of(ssize_t min, unsigned step_bits, ssize_t val)
46 {
47         /* Don't over-shift. */
48         if (step_bits == SIZET_BITS)
49                 return 0;
50         assert(step_bits < SIZET_BITS);
51         return (size_t)(val - min) >> step_bits;
52 }
53
54 /* Return the min value in bucket b. */
55 static ssize_t bucket_min(ssize_t min, unsigned step_bits, unsigned b)
56 {
57         /* Don't over-shift. */
58         if (step_bits == SIZET_BITS)
59                 return min;
60         assert(step_bits < SIZET_BITS);
61         return min + ((ssize_t)b << step_bits);
62 }
63
64 /* Does shifting by this many bits truncate the number? */
65 static bool shift_overflows(size_t num, unsigned bits)
66 {
67         if (bits == 0)
68                 return false;
69
70         return ((num << bits) >> 1) != (num << (bits - 1));
71 }
72
73 /* When min or max change, we may need to shuffle the frequency counts. */
74 static void renormalize(struct tally *tally,
75                         ssize_t new_min, ssize_t new_max)
76 {
77         size_t range, spill;
78         unsigned int i, old_min;
79
80         /* Uninitialized?  Don't do anything... */
81         if (tally->max < tally->min)
82                 goto update;
83
84         /* If we don't have sufficient range, increase step bits until
85          * buckets cover entire range of ssize_t anyway. */
86         range = (new_max - new_min) + 1;
87         while (!shift_overflows(tally->buckets, tally->step_bits)
88                && range > ((size_t)tally->buckets << tally->step_bits)) {
89                 /* Collapse down. */
90                 for (i = 1; i < tally->buckets; i++) {
91                         tally->counts[i/2] += tally->counts[i];
92                         tally->counts[i] = 0;
93                 }
94                 tally->step_bits++;
95         }
96
97         /* Now if minimum has dropped, move buckets up. */
98         old_min = bucket_of(new_min, tally->step_bits, tally->min);
99         memmove(tally->counts + old_min,
100                 tally->counts,
101                 sizeof(tally->counts[0]) * (tally->buckets - old_min));
102         memset(tally->counts, 0, sizeof(tally->counts[0]) * old_min);
103
104         /* If we moved boundaries, adjust buckets to that ratio. */
105         spill = (tally->min - new_min) % (1 << tally->step_bits);
106         for (i = 0; i < tally->buckets-1; i++) {
107                 size_t adjust = (tally->counts[i] >> tally->step_bits) * spill;
108                 tally->counts[i] -= adjust;
109                 tally->counts[i+1] += adjust;
110         }
111
112 update:
113         tally->min = new_min;
114         tally->max = new_max;
115 }
116
117 void tally_add(struct tally *tally, ssize_t val)
118 {
119         ssize_t new_min = tally->min, new_max = tally->max;
120         bool need_renormalize = false;
121
122         if (val < tally->min) {
123                 new_min = val;
124                 need_renormalize = true;
125         }
126         if (val > tally->max) {
127                 new_max = val;
128                 need_renormalize = true;
129         }
130         if (need_renormalize)
131                 renormalize(tally, new_min, new_max);
132
133         /* 128-bit arithmetic!  If we didn't want exact mean, we could just
134          * pull it out of counts. */
135         if (val > 0 && tally->total[0] + val < tally->total[0])
136                 tally->total[1]++;
137         else if (val < 0 && tally->total[0] + val > tally->total[0])
138                 tally->total[1]--;
139         tally->total[0] += val;
140         tally->counts[bucket_of(tally->min, tally->step_bits, val)]++;
141 }
142
143 size_t tally_num(const struct tally *tally)
144 {
145         size_t i, num = 0;
146         for (i = 0; i < tally->buckets; i++)
147                 num += tally->counts[i];
148         return num;
149 }
150
151 ssize_t tally_min(const struct tally *tally)
152 {
153         return tally->min;
154 }
155
156 ssize_t tally_max(const struct tally *tally)
157 {
158         return tally->max;
159 }
160
161 /* FIXME: Own ccan module please! */
162 static unsigned fls64(uint64_t val)
163 {
164 #if HAVE_BUILTIN_CLZL
165         if (val <= ULONG_MAX) {
166                 /* This is significantly faster! */
167                 return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
168         } else {
169 #endif
170         uint64_t r = 64;
171
172         if (!val)
173                 return 0;
174         if (!(val & 0xffffffff00000000ull)) {
175                 val <<= 32;
176                 r -= 32;
177         }
178         if (!(val & 0xffff000000000000ull)) {
179                 val <<= 16;
180                 r -= 16;
181         }
182         if (!(val & 0xff00000000000000ull)) {
183                 val <<= 8;
184                 r -= 8;
185         }
186         if (!(val & 0xf000000000000000ull)) {
187                 val <<= 4;
188                 r -= 4;
189         }
190         if (!(val & 0xc000000000000000ull)) {
191                 val <<= 2;
192                 r -= 2;
193         }
194         if (!(val & 0x8000000000000000ull)) {
195                 val <<= 1;
196                 r -= 1;
197         }
198         return r;
199 #if HAVE_BUILTIN_CLZL
200         }
201 #endif
202 }
203
204 /* This is stolen straight from Hacker's Delight. */
205 static uint64_t divlu64(uint64_t u1, uint64_t u0, uint64_t v)
206 {
207         const uint64_t b = 4294967296ULL; // Number base (32 bits).
208         uint32_t un[4],           // Dividend and divisor
209                 vn[2];            // normalized and broken
210                                   // up into halfwords.
211         uint32_t q[2];            // Quotient as halfwords.
212         uint64_t un1, un0,        // Dividend and divisor
213                 vn0;              // as fullwords.
214         uint64_t qhat;            // Estimated quotient digit.
215         uint64_t rhat;            // A remainder.
216         uint64_t p;               // Product of two digits.
217         int64_t s, i, j, t, k;
218
219         if (u1 >= v)              // If overflow, return the largest
220                 return (uint64_t)-1; // possible quotient.
221
222         s = 64 - fls64(v);                // 0 <= s <= 63.
223         vn0 = v << s;             // Normalize divisor.
224         vn[1] = vn0 >> 32;        // Break divisor up into
225         vn[0] = vn0 & 0xFFFFFFFF; // two 32-bit halves.
226
227         // Shift dividend left.
228         un1 = ((u1 << s) | (u0 >> (64 - s))) & (-s >> 63);
229         un0 = u0 << s;
230         un[3] = un1 >> 32;        // Break dividend up into
231         un[2] = un1;              // four 32-bit halfwords
232         un[1] = un0 >> 32;        // Note: storing into
233         un[0] = un0;              // halfwords truncates.
234
235         for (j = 1; j >= 0; j--) {
236                 // Compute estimate qhat of q[j].
237                 qhat = (un[j+2]*b + un[j+1])/vn[1];
238                 rhat = (un[j+2]*b + un[j+1]) - qhat*vn[1];
239         again:
240                 if (qhat >= b || qhat*vn[0] > b*rhat + un[j]) {
241                         qhat = qhat - 1;
242                         rhat = rhat + vn[1];
243                         if (rhat < b) goto again;
244                 }
245
246                 // Multiply and subtract.
247                 k = 0;
248                 for (i = 0; i < 2; i++) {
249                         p = qhat*vn[i];
250                         t = un[i+j] - k - (p & 0xFFFFFFFF);
251                         un[i+j] = t;
252                         k = (p >> 32) - (t >> 32);
253                 }
254                 t = un[j+2] - k;
255                 un[j+2] = t;
256
257                 q[j] = qhat;              // Store quotient digit.
258                 if (t < 0) {              // If we subtracted too
259                         q[j] = q[j] - 1;  // much, add back.
260                         k = 0;
261                         for (i = 0; i < 2; i++) {
262                                 t = un[i+j] + vn[i] + k;
263                                 un[i+j] = t;
264                                 k = t >> 32;
265                         }
266                         un[j+2] = un[j+2] + k;
267                 }
268         } // End j.
269
270         return q[1]*b + q[0];
271 }
272
273 static int64_t divls64(int64_t u1, uint64_t u0, int64_t v)
274 {
275         int64_t q, uneg, vneg, diff, borrow;
276
277         uneg = u1 >> 63;          // -1 if u < 0.
278         if (uneg) {               // Compute the absolute
279                 u0 = -u0;         // value of the dividend u.
280                 borrow = (u0 != 0);
281                 u1 = -u1 - borrow;
282         }
283
284         vneg = v >> 63;           // -1 if v < 0.
285         v = (v ^ vneg) - vneg;    // Absolute value of v.
286
287         if ((uint64_t)u1 >= (uint64_t)v)
288                 goto overflow;
289
290         q = divlu64(u1, u0, v);
291
292         diff = uneg ^ vneg;       // Negate q if signs of
293         q = (q ^ diff) - diff;    // u and v differed.
294
295         if ((diff ^ q) < 0 && q != 0) {    // If overflow, return the largest
296         overflow:                          // possible neg. quotient.
297                 q = 0x8000000000000000ULL;
298         }
299         return q;
300 }
301
302 ssize_t tally_mean(const struct tally *tally)
303 {
304         size_t count = tally_num(tally);
305         if (!count)
306                 return 0;
307
308         if (sizeof(tally->total[0]) == sizeof(uint32_t)) {
309                 /* Use standard 64-bit arithmetic. */
310                 int64_t total = tally->total[0]
311                         | (((uint64_t)tally->total[1]) << 32);
312                 return total / count;
313         }
314         return divls64(tally->total[1], tally->total[0], count);
315 }
316
317 ssize_t tally_total(const struct tally *tally, ssize_t *overflow)
318 {
319         if (overflow) {
320                 *overflow = tally->total[1];
321                 return tally->total[0];
322         }
323
324         /* If result is negative, make sure we can represent it. */
325         if (tally->total[1] & ((size_t)1 << (SIZET_BITS-1))) {
326                 /* Must have only underflowed once, and must be able to
327                  * represent result at ssize_t. */
328                 if ((~tally->total[1])+1 != 0
329                     || (ssize_t)tally->total[0] >= 0) {
330                         /* Underflow, return minimum. */
331                         return (ssize_t)((size_t)1 << (SIZET_BITS - 1));
332                 }
333         } else {
334                 /* Result is positive, must not have overflowed, and must be
335                  * able to represent as ssize_t. */
336                 if (tally->total[1] || (ssize_t)tally->total[0] < 0) {
337                         /* Overflow.  Return maximum. */
338                         return (ssize_t)~((size_t)1 << (SIZET_BITS - 1));
339                 }
340         }
341         return tally->total[0];
342 }
343
344 static ssize_t bucket_range(const struct tally *tally, unsigned b, size_t *err)
345 {
346         ssize_t min, max;
347
348         min = bucket_min(tally->min, tally->step_bits, b);
349         if (b == tally->buckets - 1)
350                 max = tally->max;
351         else
352                 max = bucket_min(tally->min, tally->step_bits, b+1) - 1;
353
354         /* FIXME: Think harder about cumulative error; is this enough?. */
355         *err = (max - min + 1) / 2;
356         /* Avoid overflow. */
357         return min + (max - min) / 2;
358 }
359
360 ssize_t tally_approx_median(const struct tally *tally, size_t *err)
361 {
362         size_t count = tally_num(tally), total = 0;
363         unsigned int i;
364
365         for (i = 0; i < tally->buckets; i++) {
366                 total += tally->counts[i];
367                 if (total * 2 >= count)
368                         break;
369         }
370         return bucket_range(tally, i, err);
371 }
372
373 ssize_t tally_approx_mode(const struct tally *tally, size_t *err)
374 {
375         unsigned int i, min_best = 0, max_best = 0;
376
377         for (i = 0; i < tally->buckets; i++) {
378                 if (tally->counts[i] > tally->counts[min_best]) {
379                         min_best = max_best = i;
380                 } else if (tally->counts[i] == tally->counts[min_best]) {
381                         max_best = i;
382                 }
383         }
384
385         /* We can have more than one best, making our error huge. */
386         if (min_best != max_best) {
387                 ssize_t min, max;
388                 min = bucket_range(tally, min_best, err);
389                 max = bucket_range(tally, max_best, err);
390                 max += *err;
391                 *err += (size_t)(max - min);
392                 return min + (max - min) / 2;
393         }
394
395         return bucket_range(tally, min_best, err);
396 }
397
398 static unsigned get_max_bucket(const struct tally *tally)
399 {
400         unsigned int i;
401
402         for (i = tally->buckets; i > 0; i--)
403                 if (tally->counts[i-1])
404                         break;
405         return i;
406 }
407
408 char *tally_histogram(const struct tally *tally,
409                       unsigned width, unsigned height)
410 {
411         unsigned int i, count, max_bucket, largest_bucket;
412         struct tally *tmp;
413         char *graph, *p;
414
415         assert(width >= TALLY_MIN_HISTO_WIDTH);
416         assert(height >= TALLY_MIN_HISTO_HEIGHT);
417
418         /* Ignore unused buckets. */
419         max_bucket = get_max_bucket(tally);
420
421         /* FIXME: It'd be nice to smooth here... */
422         if (height >= max_bucket) {
423                 height = max_bucket;
424                 tmp = NULL;
425         } else {
426                 /* We create a temporary then renormalize so < height. */
427                 /* FIXME: Antialias properly! */
428                 tmp = tally_new(tally->buckets);
429                 if (!tmp)
430                         return NULL;
431                 tmp->min = tally->min;
432                 tmp->max = tally->max;
433                 tmp->step_bits = tally->step_bits;
434                 memcpy(tmp->counts, tally->counts,
435                        sizeof(tally->counts[0]) * tmp->buckets);
436                 while ((max_bucket = get_max_bucket(tmp)) >= height)
437                         renormalize(tmp, tmp->min, tmp->max * 2);
438                 /* Restore max */
439                 tmp->max = tally->max;
440                 tally = tmp;
441                 height = max_bucket;
442         }
443
444         /* Figure out longest line, for scale. */
445         largest_bucket = 0;
446         for (i = 0; i < tally->buckets; i++) {
447                 if (tally->counts[i] > largest_bucket)
448                         largest_bucket = tally->counts[i];
449         }
450
451         p = graph = malloc(height * (width + 1) + 1);
452         if (!graph) {
453                 free(tmp);
454                 return NULL;
455         }
456
457         for (i = 0; i < height; i++) {
458                 unsigned covered = 1, row;
459
460                 /* People expect minimum at the bottom. */
461                 row = height - i - 1;
462                 count = (double)tally->counts[row] / largest_bucket * (width-1)+1;
463
464                 if (row == 0)
465                         covered = snprintf(p, width, "%zi", tally->min);
466                 else if (row == height - 1)
467                         covered = snprintf(p, width, "%zi", tally->max);
468                 else if (row == bucket_of(tally->min, tally->step_bits, 0))
469                         *p = '+';
470                 else
471                         *p = '|';
472
473                 if (covered > width)
474                         covered = width;
475                 p += covered;
476
477                 if (count > covered)
478                         count -= covered;
479                 else
480                         count = 0;
481
482                 memset(p, '*', count);
483                 p += count;
484                 *p = '\n';
485                 p++;
486         }
487         *p = '\0';
488         free(tmp);
489         return graph;
490 }