1 /* LGPL (v2.1 or any later version) - see LICENSE file for details */
2 #include <ccan/timer/timer.h>
3 #include <ccan/array_size/array_size.h>
4 #include <ccan/ilog/ilog.h>
5 #include <ccan/likely/likely.h>
9 #define PER_LEVEL (1ULL << TIMER_LEVEL_BITS)
12 struct list_head list[PER_LEVEL];
15 static uint64_t time_to_grains(struct timeabs t)
17 return t.ts.tv_sec * ((uint64_t)1000000000 / TIMER_GRANULARITY)
18 + (t.ts.tv_nsec / TIMER_GRANULARITY);
21 static struct timeabs grains_to_time(uint64_t grains)
25 t.ts.tv_sec = grains / (1000000000 / TIMER_GRANULARITY);
26 t.ts.tv_nsec = (grains % (1000000000 / TIMER_GRANULARITY))
31 void timers_init(struct timers *timers, struct timeabs start)
35 list_head_init(&timers->far);
36 timers->base = time_to_grains(start);
37 timers->first = -1ULL;
38 for (i = 0; i < ARRAY_SIZE(timers->level); i++)
39 timers->level[i] = NULL;
42 static unsigned int level_of(const struct timers *timers, uint64_t time)
46 /* Level depends how far away it is. */
47 diff = time - timers->base;
48 return ilog64(diff / 2) / TIMER_LEVEL_BITS;
51 static void timer_add_raw(struct timers *timers, struct timer *t)
54 unsigned int level = level_of(timers, t->time);
56 if (!timers->level[level])
59 int off = (t->time >> (level*TIMER_LEVEL_BITS)) & (PER_LEVEL-1);
60 l = &timers->level[level]->list[off];
63 list_add_tail(l, &t->list);
66 void timer_init(struct timer *t)
68 list_node_init(&t->list);
71 static bool list_node_initted(const struct list_node *n)
76 void timer_add(struct timers *timers, struct timer *t, struct timeabs when)
78 assert(list_node_initted(&t->list));
80 t->time = time_to_grains(when);
82 /* Added in the past? Treat it as imminent. */
83 if (t->time < timers->base)
84 t->time = timers->base;
85 if (t->time < timers->first)
86 timers->first = t->time;
88 timer_add_raw(timers, t);
92 void timer_del(struct timers *timers, struct timer *t)
94 list_del_init(&t->list);
97 static void timers_far_get(struct timers *timers,
98 struct list_head *list,
101 struct timer *i, *next;
103 list_for_each_safe(&timers->far, i, next, list) {
104 if (i->time <= when) {
105 list_del_from(&timers->far, &i->list);
106 list_add_tail(list, &i->list);
111 static void add_level(struct timers *timers, unsigned int level)
113 struct timer_level *l;
116 struct list_head from_far;
118 l = malloc(sizeof(*l));
122 for (i = 0; i < ARRAY_SIZE(l->list); i++)
123 list_head_init(&l->list[i]);
124 timers->level[level] = l;
126 list_head_init(&from_far);
127 timers_far_get(timers, &from_far,
128 timers->base + (1ULL << ((level+1)*TIMER_LEVEL_BITS)) - 1);
130 while ((t = list_pop(&from_far, struct timer, list)) != NULL)
131 timer_add_raw(timers, t);
134 static const struct timer *find_first(const struct list_head *list,
135 const struct timer *prev)
139 list_for_each(list, t, list) {
140 if (!prev || t->time < prev->time)
146 static const struct timer *get_first(const struct timers *timers)
148 unsigned int level, i, off;
151 const struct timer *found = NULL;
154 if (timers->first < timers->base) {
158 /* May not be accurate, due to timer_del / expiry. */
159 level = level_of(timers, timers->first);
160 base = timers->first >> (TIMER_LEVEL_BITS * level);
164 if (!timers->level[level])
165 return find_first(&timers->far, NULL);
168 off = base % PER_LEVEL;
169 for (i = 0; i < PER_LEVEL; i++) {
170 h = &timers->level[level]->list[(i+off) % PER_LEVEL];
175 /* We haven't cascaded yet, so if we wrap, we'll need to
176 * check next level, too. */
177 if (i + off == PER_LEVEL)
180 if (i == PER_LEVEL) {
182 base >>= TIMER_LEVEL_BITS;
184 /* We need *next* bucket: we've started reusing the
190 /* Level 0 is exact, so they're all the same. */
192 found = list_top(h, struct timer, list);
194 found = find_first(h, NULL);
198 if (!timers->level[level+1]) {
199 found = find_first(&timers->far, found);
201 /* Current upper bucket has emptied into this
202 * bucket; we want *next* one. */
203 base >>= TIMER_LEVEL_BITS;
205 off = base % PER_LEVEL;
210 h = &timers->level[level+1]->list[off];
211 found = find_first(h, found);
218 static bool update_first(struct timers *timers)
220 const struct timer *found = get_first(timers);
223 timers->first = -1ULL;
227 timers->first = found->time;
231 bool timer_earliest(struct timers *timers, struct timeabs *first)
233 if (!update_first(timers))
236 *first = grains_to_time(timers->first);
240 /* Assume no timers before 'time', cascade down and update base time. */
241 static void timer_fast_forward(struct timers *timers, uint64_t time)
243 unsigned int level, changed;
245 struct list_head list;
248 /* How many bits changed between base and time?
249 * Each time we wrap, we need to empty buckets from above. */
250 if (time == timers->base)
253 changed = ilog64_nz(time ^ timers->base);
254 level = (changed - 1) / TIMER_LEVEL_BITS;
256 /* Buckets always empty downwards, so we could cascade manually,
257 * but it's rarely very many so we just remove and re-add */
258 list_head_init(&list);
261 if (!timers->level[level]) {
262 /* We need any which belong on this level. */
263 timers_far_get(timers, &list,
265 + (1ULL << ((level+1)*TIMER_LEVEL_BITS))-1);
270 /* Get all timers from this bucket. */
271 src = (time >> (level * TIMER_LEVEL_BITS)) % PER_LEVEL;
272 list_append_list(&list,
273 &timers->level[level]->list[src]);
277 /* Did we hit the last level? If so, add. */
278 if (need_level != -1)
279 add_level(timers, need_level);
281 /* Fast-forward the time, and re-add everyone. */
283 while ((i = list_pop(&list, struct timer, list)) != NULL)
284 timer_add_raw(timers, i);
287 /* Returns an expired timer. */
288 struct timer *timers_expire(struct timers *timers, struct timeabs expire)
290 uint64_t now = time_to_grains(expire);
294 assert(now >= timers->base);
296 if (!timers->level[0]) {
297 if (list_empty(&timers->far))
299 add_level(timers, 0);
303 if (timers->first > now) {
304 timer_fast_forward(timers, now);
308 timer_fast_forward(timers, timers->first);
309 off = timers->base % PER_LEVEL;
311 /* This *may* be NULL, if we deleted the first timer */
312 t = list_pop(&timers->level[0]->list[off], struct timer, list);
314 list_node_init(&t->list);
315 } while (!t && update_first(timers));
320 static bool timer_list_check(const struct list_head *l,
321 uint64_t min, uint64_t max, uint64_t first,
322 const char *abortstr)
324 const struct timer *t;
326 if (!list_check(l, abortstr))
329 list_for_each(l, t, list) {
330 if (t->time < min || t->time > max) {
333 "%s: timer %p %llu not %llu-%llu\n",
334 abortstr, t, (long long)t->time,
335 (long long)min, (long long)max);
340 if (t->time < first) {
343 "%s: timer %p %llu < minimum %llu\n",
344 abortstr, t, (long long)t->time,
354 struct timers *timers_check(const struct timers *timers, const char *abortstr)
356 unsigned int l, i, off;
360 if (!timers->level[0])
363 /* First level is simple. */
364 off = timers->base % PER_LEVEL;
365 for (i = 0; i < PER_LEVEL; i++) {
368 h = &timers->level[l]->list[(i+off) % PER_LEVEL];
369 if (!timer_list_check(h, timers->base + i, timers->base + i,
370 timers->first, abortstr))
374 /* For other levels, "current" bucket has been emptied, and may contain
375 * entries for the current + level_size bucket. */
376 for (l = 1; l < ARRAY_SIZE(timers->level) && timers->level[l]; l++) {
377 uint64_t per_bucket = 1ULL << (TIMER_LEVEL_BITS * l);
379 off = ((timers->base >> (l*TIMER_LEVEL_BITS)) % PER_LEVEL);
380 /* We start at *next* bucket. */
381 base = (timers->base & ~(per_bucket - 1)) + per_bucket;
383 for (i = 1; i <= PER_LEVEL; i++) {
386 h = &timers->level[l]->list[(i+off) % PER_LEVEL];
387 if (!timer_list_check(h, base, base + per_bucket - 1,
388 timers->first, abortstr))
395 base = (timers->base & ~((1ULL << (TIMER_LEVEL_BITS * l)) - 1))
396 + (1ULL << (TIMER_LEVEL_BITS * l)) - 1;
397 if (!timer_list_check(&timers->far, base, -1ULL, timers->first,
401 return (struct timers *)timers;
404 #ifdef CCAN_TIMER_DEBUG
405 static void dump_bucket_stats(FILE *fp, const struct list_head *h)
407 unsigned long long min, max, num;
418 list_for_each(h, t, list) {
425 fprintf(fp, " %llu (%llu-%llu)\n",
429 void timers_dump(const struct timers *timers, FILE *fp)
431 unsigned int l, i, off;
432 unsigned long long base;
437 fprintf(fp, "Base: %llu\n", (unsigned long long)timers->base);
439 if (!timers->level[0])
442 fprintf(fp, "Level 0:\n");
444 /* First level is simple. */
445 off = timers->base % PER_LEVEL;
446 for (i = 0; i < PER_LEVEL; i++) {
447 const struct list_head *h;
449 fprintf(fp, " Bucket %llu (%lu):",
450 (i+off) % PER_LEVEL, timers->base + i);
451 h = &timers->level[0]->list[(i+off) % PER_LEVEL];
452 dump_bucket_stats(fp, h);
455 /* For other levels, "current" bucket has been emptied, and may contain
456 * entries for the current + level_size bucket. */
457 for (l = 1; l < ARRAY_SIZE(timers->level) && timers->level[l]; l++) {
458 uint64_t per_bucket = 1ULL << (TIMER_LEVEL_BITS * l);
460 off = ((timers->base >> (l*TIMER_LEVEL_BITS)) % PER_LEVEL);
461 /* We start at *next* bucket. */
462 base = (timers->base & ~(per_bucket - 1)) + per_bucket;
464 fprintf(fp, "Level %u:\n", l);
465 for (i = 1; i <= PER_LEVEL; i++) {
466 const struct list_head *h;
468 fprintf(fp, " Bucket %llu (%llu - %llu):",
470 base, base + per_bucket - 1);
472 h = &timers->level[l]->list[(i+off) % PER_LEVEL];
473 dump_bucket_stats(fp, h);
479 if (!list_empty(&timers->far)) {
480 fprintf(fp, "Far timers:");
481 dump_bucket_stats(fp, &timers->far);
486 void timers_cleanup(struct timers *timers)
490 for (l = 0; l < ARRAY_SIZE(timers->level); l++)
491 free(timers->level[l]);