tally: Adapt tally_num to Samba coding conventions
[ccan] / ccan / tally / tally.c
1 /* Licensed under LGPLv3+ - see LICENSE file for details */
2 #include <ccan/tally/tally.h>
3 #include <ccan/build_assert/build_assert.h>
4 #include <ccan/likely/likely.h>
5 #include <stdint.h>
6 #include <limits.h>
7 #include <string.h>
8 #include <stdio.h>
9 #include <assert.h>
10 #include <stdlib.h>
11
12 #define SIZET_BITS (sizeof(size_t)*CHAR_BIT)
13
14 /* We use power of 2 steps.  I tried being tricky, but it got buggy. */
15 struct tally {
16         ssize_t min, max;
17         size_t total[2];
18         /* This allows limited frequency analysis. */
19         unsigned buckets, step_bits;
20         size_t counts[1 /* Actually: [buckets] */ ];
21 };
22
23 struct tally *tally_new(unsigned buckets)
24 {
25         struct tally *tally;
26
27         /* There is always 1 bucket. */
28         if (buckets == 0) {
29                 buckets = 1;
30         }
31
32         /* Overly cautious check for overflow. */
33         if (sizeof(*tally) * buckets / sizeof(*tally) != buckets) {
34                 return NULL;
35         }
36
37         tally = (struct tally *)malloc(
38                 sizeof(*tally) + sizeof(tally->counts[0])*(buckets-1));
39         if (tally == NULL) {
40                 return NULL;
41         }
42
43         tally->max = ((size_t)1 << (SIZET_BITS - 1));
44         tally->min = ~tally->max;
45         tally->total[0] = tally->total[1] = 0;
46         tally->buckets = buckets;
47         tally->step_bits = 0;
48         memset(tally->counts, 0, sizeof(tally->counts[0])*buckets);
49         return tally;
50 }
51
52 static unsigned bucket_of(ssize_t min, unsigned step_bits, ssize_t val)
53 {
54         /* Don't over-shift. */
55         if (step_bits == SIZET_BITS) {
56                 return 0;
57         }
58         assert(step_bits < SIZET_BITS);
59         return (size_t)(val - min) >> step_bits;
60 }
61
62 /* Return the min value in bucket b. */
63 static ssize_t bucket_min(ssize_t min, unsigned step_bits, unsigned b)
64 {
65         /* Don't over-shift. */
66         if (step_bits == SIZET_BITS) {
67                 return min;
68         }
69         assert(step_bits < SIZET_BITS);
70         return min + ((ssize_t)b << step_bits);
71 }
72
73 /* Does shifting by this many bits truncate the number? */
74 static bool shift_overflows(size_t num, unsigned bits)
75 {
76         if (bits == 0) {
77                 return false;
78         }
79
80         return ((num << bits) >> 1) != (num << (bits - 1));
81 }
82
83 /* When min or max change, we may need to shuffle the frequency counts. */
84 static void renormalize(struct tally *tally,
85                         ssize_t new_min, ssize_t new_max)
86 {
87         size_t range, spill;
88         unsigned int i, old_min;
89
90         /* Uninitialized?  Don't do anything... */
91         if (tally->max < tally->min) {
92                 goto update;
93         }
94
95         /* If we don't have sufficient range, increase step bits until
96          * buckets cover entire range of ssize_t anyway. */
97         range = (new_max - new_min) + 1;
98         while (!shift_overflows(tally->buckets, tally->step_bits)
99                && range > ((size_t)tally->buckets << tally->step_bits)) {
100                 /* Collapse down. */
101                 for (i = 1; i < tally->buckets; i++) {
102                         tally->counts[i/2] += tally->counts[i];
103                         tally->counts[i] = 0;
104                 }
105                 tally->step_bits++;
106         }
107
108         /* Now if minimum has dropped, move buckets up. */
109         old_min = bucket_of(new_min, tally->step_bits, tally->min);
110         memmove(tally->counts + old_min,
111                 tally->counts,
112                 sizeof(tally->counts[0]) * (tally->buckets - old_min));
113         memset(tally->counts, 0, sizeof(tally->counts[0]) * old_min);
114
115         /* If we moved boundaries, adjust buckets to that ratio. */
116         spill = (tally->min - new_min) % (1 << tally->step_bits);
117         for (i = 0; i < tally->buckets-1; i++) {
118                 size_t adjust = (tally->counts[i] >> tally->step_bits) * spill;
119                 tally->counts[i] -= adjust;
120                 tally->counts[i+1] += adjust;
121         }
122
123 update:
124         tally->min = new_min;
125         tally->max = new_max;
126 }
127
128 void tally_add(struct tally *tally, ssize_t val)
129 {
130         ssize_t new_min = tally->min, new_max = tally->max;
131         bool need_renormalize = false;
132
133         if (val < tally->min) {
134                 new_min = val;
135                 need_renormalize = true;
136         }
137         if (val > tally->max) {
138                 new_max = val;
139                 need_renormalize = true;
140         }
141         if (need_renormalize) {
142                 renormalize(tally, new_min, new_max);
143         }
144
145         /* 128-bit arithmetic!  If we didn't want exact mean, we could just
146          * pull it out of counts. */
147         if (val > 0 && tally->total[0] + val < tally->total[0]) {
148                 tally->total[1]++;
149         } else if (val < 0 && tally->total[0] + val > tally->total[0]) {
150                 tally->total[1]--;
151         }
152         tally->total[0] += val;
153         tally->counts[bucket_of(tally->min, tally->step_bits, val)]++;
154 }
155
156 size_t tally_num(const struct tally *tally)
157 {
158         size_t i, num = 0;
159         for (i = 0; i < tally->buckets; i++) {
160                 num += tally->counts[i];
161         }
162         return num;
163 }
164
165 ssize_t tally_min(const struct tally *tally)
166 {
167         return tally->min;
168 }
169
170 ssize_t tally_max(const struct tally *tally)
171 {
172         return tally->max;
173 }
174
175 /* FIXME: Own ccan module please! */
176 static unsigned fls64(uint64_t val)
177 {
178 #if HAVE_BUILTIN_CLZL
179         if (val <= ULONG_MAX) {
180                 /* This is significantly faster! */
181                 return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
182         } else {
183 #endif
184         uint64_t r = 64;
185
186         if (!val)
187                 return 0;
188         if (!(val & 0xffffffff00000000ull)) {
189                 val <<= 32;
190                 r -= 32;
191         }
192         if (!(val & 0xffff000000000000ull)) {
193                 val <<= 16;
194                 r -= 16;
195         }
196         if (!(val & 0xff00000000000000ull)) {
197                 val <<= 8;
198                 r -= 8;
199         }
200         if (!(val & 0xf000000000000000ull)) {
201                 val <<= 4;
202                 r -= 4;
203         }
204         if (!(val & 0xc000000000000000ull)) {
205                 val <<= 2;
206                 r -= 2;
207         }
208         if (!(val & 0x8000000000000000ull)) {
209                 val <<= 1;
210                 r -= 1;
211         }
212         return r;
213 #if HAVE_BUILTIN_CLZL
214         }
215 #endif
216 }
217
218 /* This is stolen straight from Hacker's Delight. */
219 static uint64_t divlu64(uint64_t u1, uint64_t u0, uint64_t v)
220 {
221         const uint64_t b = 4294967296ULL; // Number base (32 bits).
222         uint32_t un[4],           // Dividend and divisor
223                 vn[2];            // normalized and broken
224                                   // up into halfwords.
225         uint32_t q[2];            // Quotient as halfwords.
226         uint64_t un1, un0,        // Dividend and divisor
227                 vn0;              // as fullwords.
228         uint64_t qhat;            // Estimated quotient digit.
229         uint64_t rhat;            // A remainder.
230         uint64_t p;               // Product of two digits.
231         int64_t s, i, j, t, k;
232
233         if (u1 >= v)              // If overflow, return the largest
234                 return (uint64_t)-1; // possible quotient.
235
236         s = 64 - fls64(v);                // 0 <= s <= 63.
237         vn0 = v << s;             // Normalize divisor.
238         vn[1] = vn0 >> 32;        // Break divisor up into
239         vn[0] = vn0 & 0xFFFFFFFF; // two 32-bit halves.
240
241         // Shift dividend left.
242         un1 = ((u1 << s) | (u0 >> (64 - s))) & (-s >> 63);
243         un0 = u0 << s;
244         un[3] = un1 >> 32;        // Break dividend up into
245         un[2] = un1;              // four 32-bit halfwords
246         un[1] = un0 >> 32;        // Note: storing into
247         un[0] = un0;              // halfwords truncates.
248
249         for (j = 1; j >= 0; j--) {
250                 // Compute estimate qhat of q[j].
251                 qhat = (un[j+2]*b + un[j+1])/vn[1];
252                 rhat = (un[j+2]*b + un[j+1]) - qhat*vn[1];
253         again:
254                 if (qhat >= b || qhat*vn[0] > b*rhat + un[j]) {
255                         qhat = qhat - 1;
256                         rhat = rhat + vn[1];
257                         if (rhat < b) goto again;
258                 }
259
260                 // Multiply and subtract.
261                 k = 0;
262                 for (i = 0; i < 2; i++) {
263                         p = qhat*vn[i];
264                         t = un[i+j] - k - (p & 0xFFFFFFFF);
265                         un[i+j] = t;
266                         k = (p >> 32) - (t >> 32);
267                 }
268                 t = un[j+2] - k;
269                 un[j+2] = t;
270
271                 q[j] = qhat;              // Store quotient digit.
272                 if (t < 0) {              // If we subtracted too
273                         q[j] = q[j] - 1;  // much, add back.
274                         k = 0;
275                         for (i = 0; i < 2; i++) {
276                                 t = un[i+j] + vn[i] + k;
277                                 un[i+j] = t;
278                                 k = t >> 32;
279                         }
280                         un[j+2] = un[j+2] + k;
281                 }
282         } // End j.
283
284         return q[1]*b + q[0];
285 }
286
287 static int64_t divls64(int64_t u1, uint64_t u0, int64_t v)
288 {
289         int64_t q, uneg, vneg, diff, borrow;
290
291         uneg = u1 >> 63;          // -1 if u < 0.
292         if (uneg) {               // Compute the absolute
293                 u0 = -u0;         // value of the dividend u.
294                 borrow = (u0 != 0);
295                 u1 = -u1 - borrow;
296         }
297
298         vneg = v >> 63;           // -1 if v < 0.
299         v = (v ^ vneg) - vneg;    // Absolute value of v.
300
301         if ((uint64_t)u1 >= (uint64_t)v)
302                 goto overflow;
303
304         q = divlu64(u1, u0, v);
305
306         diff = uneg ^ vneg;       // Negate q if signs of
307         q = (q ^ diff) - diff;    // u and v differed.
308
309         if ((diff ^ q) < 0 && q != 0) {    // If overflow, return the largest
310         overflow:                          // possible neg. quotient.
311                 q = 0x8000000000000000ULL;
312         }
313         return q;
314 }
315
316 ssize_t tally_mean(const struct tally *tally)
317 {
318         size_t count = tally_num(tally);
319         if (!count)
320                 return 0;
321
322         if (sizeof(tally->total[0]) == sizeof(uint32_t)) {
323                 /* Use standard 64-bit arithmetic. */
324                 int64_t total = tally->total[0]
325                         | (((uint64_t)tally->total[1]) << 32);
326                 return total / count;
327         }
328         return divls64(tally->total[1], tally->total[0], count);
329 }
330
331 ssize_t tally_total(const struct tally *tally, ssize_t *overflow)
332 {
333         if (overflow) {
334                 *overflow = tally->total[1];
335                 return tally->total[0];
336         }
337
338         /* If result is negative, make sure we can represent it. */
339         if (tally->total[1] & ((size_t)1 << (SIZET_BITS-1))) {
340                 /* Must have only underflowed once, and must be able to
341                  * represent result at ssize_t. */
342                 if ((~tally->total[1])+1 != 0
343                     || (ssize_t)tally->total[0] >= 0) {
344                         /* Underflow, return minimum. */
345                         return (ssize_t)((size_t)1 << (SIZET_BITS - 1));
346                 }
347         } else {
348                 /* Result is positive, must not have overflowed, and must be
349                  * able to represent as ssize_t. */
350                 if (tally->total[1] || (ssize_t)tally->total[0] < 0) {
351                         /* Overflow.  Return maximum. */
352                         return (ssize_t)~((size_t)1 << (SIZET_BITS - 1));
353                 }
354         }
355         return tally->total[0];
356 }
357
358 static ssize_t bucket_range(const struct tally *tally, unsigned b, size_t *err)
359 {
360         ssize_t min, max;
361
362         min = bucket_min(tally->min, tally->step_bits, b);
363         if (b == tally->buckets - 1)
364                 max = tally->max;
365         else
366                 max = bucket_min(tally->min, tally->step_bits, b+1) - 1;
367
368         /* FIXME: Think harder about cumulative error; is this enough?. */
369         *err = (max - min + 1) / 2;
370         /* Avoid overflow. */
371         return min + (max - min) / 2;
372 }
373
374 ssize_t tally_approx_median(const struct tally *tally, size_t *err)
375 {
376         size_t count = tally_num(tally), total = 0;
377         unsigned int i;
378
379         for (i = 0; i < tally->buckets; i++) {
380                 total += tally->counts[i];
381                 if (total * 2 >= count)
382                         break;
383         }
384         return bucket_range(tally, i, err);
385 }
386
387 ssize_t tally_approx_mode(const struct tally *tally, size_t *err)
388 {
389         unsigned int i, min_best = 0, max_best = 0;
390
391         for (i = 0; i < tally->buckets; i++) {
392                 if (tally->counts[i] > tally->counts[min_best]) {
393                         min_best = max_best = i;
394                 } else if (tally->counts[i] == tally->counts[min_best]) {
395                         max_best = i;
396                 }
397         }
398
399         /* We can have more than one best, making our error huge. */
400         if (min_best != max_best) {
401                 ssize_t min, max;
402                 min = bucket_range(tally, min_best, err);
403                 max = bucket_range(tally, max_best, err);
404                 max += *err;
405                 *err += (size_t)(max - min);
406                 return min + (max - min) / 2;
407         }
408
409         return bucket_range(tally, min_best, err);
410 }
411
412 static unsigned get_max_bucket(const struct tally *tally)
413 {
414         unsigned int i;
415
416         for (i = tally->buckets; i > 0; i--)
417                 if (tally->counts[i-1])
418                         break;
419         return i;
420 }
421
422 char *tally_histogram(const struct tally *tally,
423                       unsigned width, unsigned height)
424 {
425         unsigned int i, count, max_bucket, largest_bucket;
426         struct tally *tmp;
427         char *graph, *p;
428
429         assert(width >= TALLY_MIN_HISTO_WIDTH);
430         assert(height >= TALLY_MIN_HISTO_HEIGHT);
431
432         /* Ignore unused buckets. */
433         max_bucket = get_max_bucket(tally);
434
435         /* FIXME: It'd be nice to smooth here... */
436         if (height >= max_bucket) {
437                 height = max_bucket;
438                 tmp = NULL;
439         } else {
440                 /* We create a temporary then renormalize so < height. */
441                 /* FIXME: Antialias properly! */
442                 tmp = tally_new(tally->buckets);
443                 if (!tmp)
444                         return NULL;
445                 tmp->min = tally->min;
446                 tmp->max = tally->max;
447                 tmp->step_bits = tally->step_bits;
448                 memcpy(tmp->counts, tally->counts,
449                        sizeof(tally->counts[0]) * tmp->buckets);
450                 while ((max_bucket = get_max_bucket(tmp)) >= height)
451                         renormalize(tmp, tmp->min, tmp->max * 2);
452                 /* Restore max */
453                 tmp->max = tally->max;
454                 tally = tmp;
455                 height = max_bucket;
456         }
457
458         /* Figure out longest line, for scale. */
459         largest_bucket = 0;
460         for (i = 0; i < tally->buckets; i++) {
461                 if (tally->counts[i] > largest_bucket)
462                         largest_bucket = tally->counts[i];
463         }
464
465         p = graph = (char *)malloc(height * (width + 1) + 1);
466         if (!graph) {
467                 free(tmp);
468                 return NULL;
469         }
470
471         for (i = 0; i < height; i++) {
472                 unsigned covered = 1, row;
473
474                 /* People expect minimum at the bottom. */
475                 row = height - i - 1;
476                 count = (double)tally->counts[row] / largest_bucket * (width-1)+1;
477
478                 if (row == 0)
479                         covered = snprintf(p, width, "%zi", tally->min);
480                 else if (row == height - 1)
481                         covered = snprintf(p, width, "%zi", tally->max);
482                 else if (row == bucket_of(tally->min, tally->step_bits, 0))
483                         *p = '+';
484                 else
485                         *p = '|';
486
487                 if (covered > width)
488                         covered = width;
489                 p += covered;
490
491                 if (count > covered)
492                         count -= covered;
493                 else
494                         count = 0;
495
496                 memset(p, '*', count);
497                 p += count;
498                 *p = '\n';
499                 p++;
500         }
501         *p = '\0';
502         free(tmp);
503         return graph;
504 }