1 /* LGPL (v2.1 or any later version) - see LICENSE file for details */
2 #include <ccan/timer/timer.h>
3 #include <ccan/array_size/array_size.h>
4 #include <ccan/ilog/ilog.h>
5 #include <ccan/likely/likely.h>
9 #define PER_LEVEL (1ULL << TIMER_LEVEL_BITS)
12 struct list_head list[PER_LEVEL];
15 static uint64_t time_to_grains(struct timeabs t)
17 return t.ts.tv_sec * ((uint64_t)1000000000 / TIMER_GRANULARITY)
18 + (t.ts.tv_nsec / TIMER_GRANULARITY);
21 static struct timeabs grains_to_time(uint64_t grains)
25 t.ts.tv_sec = grains / (1000000000 / TIMER_GRANULARITY);
26 t.ts.tv_nsec = (grains % (1000000000 / TIMER_GRANULARITY))
31 void timers_init(struct timers *timers, struct timeabs start)
35 list_head_init(&timers->far);
36 timers->base = time_to_grains(start);
37 timers->first = -1ULL;
38 for (i = 0; i < ARRAY_SIZE(timers->level); i++)
39 timers->level[i] = NULL;
42 static unsigned int level_of(const struct timers *timers, uint64_t time)
46 /* Level depends how far away it is. */
47 diff = time - timers->base;
48 return ilog64(diff / 2) / TIMER_LEVEL_BITS;
51 static void timer_add_raw(struct timers *timers, struct timer *t)
54 unsigned int level = level_of(timers, t->time);
56 if (!timers->level[level])
59 int off = (t->time >> (level*TIMER_LEVEL_BITS)) & (PER_LEVEL-1);
60 l = &timers->level[level]->list[off];
63 list_add_tail(l, &t->list);
66 void timer_init(struct timer *t)
68 list_node_init(&t->list);
71 static bool list_node_initted(const struct list_node *n)
76 void timer_add(struct timers *timers, struct timer *t, struct timeabs when)
78 assert(list_node_initted(&t->list));
80 t->time = time_to_grains(when);
82 /* Added in the past? Treat it as imminent. */
83 if (t->time < timers->base)
84 t->time = timers->base;
85 if (t->time < timers->first)
86 timers->first = t->time;
88 timer_add_raw(timers, t);
92 void timer_del(struct timers *timers, struct timer *t)
94 list_del_init(&t->list);
97 static void timers_far_get(struct timers *timers,
98 struct list_head *list,
101 struct timer *i, *next;
103 list_for_each_safe(&timers->far, i, next, list) {
104 if (i->time <= when) {
105 list_del_from(&timers->far, &i->list);
106 list_add_tail(list, &i->list);
111 static void add_level(struct timers *timers, unsigned int level)
113 struct timer_level *l;
116 struct list_head from_far;
118 l = malloc(sizeof(*l));
122 for (i = 0; i < ARRAY_SIZE(l->list); i++)
123 list_head_init(&l->list[i]);
124 timers->level[level] = l;
126 list_head_init(&from_far);
127 timers_far_get(timers, &from_far,
128 timers->base + (1ULL << ((level+1)*TIMER_LEVEL_BITS)) - 1);
130 while ((t = list_pop(&from_far, struct timer, list)) != NULL)
131 timer_add_raw(timers, t);
134 /* We don't need to search past the first at level 0, since the
135 * bucket range is 1; they're all the same. */
136 static const struct timer *find_first(const struct list_head *list,
138 const struct timer *prev)
142 list_for_each(list, t, list) {
143 if (!prev || t->time < prev->time)
151 static const struct timer *get_first(const struct timers *timers)
153 unsigned int level, i, off;
156 const struct timer *found = NULL;
159 if (timers->first < timers->base) {
163 /* May not be accurate, due to timer_del / expiry. */
164 level = level_of(timers, timers->first);
165 base = timers->first >> (TIMER_LEVEL_BITS * level);
169 if (!timers->level[level])
170 return find_first(&timers->far, -1U, NULL);
173 off = base % PER_LEVEL;
174 for (i = 0; i < PER_LEVEL; i++) {
175 h = &timers->level[level]->list[(i+off) % PER_LEVEL];
180 /* We haven't cascaded yet, so if we wrap, we'll need to
181 * check next level, too. */
182 if (i + off == PER_LEVEL)
185 if (i == PER_LEVEL) {
187 base >>= TIMER_LEVEL_BITS;
189 /* We need *next* bucket: we've started reusing the
195 /* Level 0 is exact, so they're all the same. */
196 found = find_first(h, level, NULL);
200 if (!timers->level[level+1]) {
201 found = find_first(&timers->far, -1U, found);
203 /* Current upper bucket has emptied into this
204 * bucket; we want *next* one. */
205 base >>= TIMER_LEVEL_BITS;
207 off = base % PER_LEVEL;
212 h = &timers->level[level+1]->list[off];
213 found = find_first(h, level+1, found);
220 static bool update_first(struct timers *timers)
222 const struct timer *found = get_first(timers);
225 timers->first = -1ULL;
229 timers->first = found->time;
233 bool timer_earliest(struct timers *timers, struct timeabs *first)
235 if (!update_first(timers))
238 *first = grains_to_time(timers->first);
242 /* Assume no timers before 'time', cascade down and update base time. */
243 static void timer_fast_forward(struct timers *timers, uint64_t time)
245 unsigned int level, changed;
247 struct list_head list;
250 /* How many bits changed between base and time?
251 * Each time we wrap, we need to empty buckets from above. */
252 if (time == timers->base)
255 changed = ilog64_nz(time ^ timers->base);
256 level = (changed - 1) / TIMER_LEVEL_BITS;
258 /* Buckets always empty downwards, so we could cascade manually,
259 * but it's rarely very many so we just remove and re-add */
260 list_head_init(&list);
263 if (!timers->level[level]) {
264 /* We need any which belong on this level. */
265 timers_far_get(timers, &list,
267 + (1ULL << ((level+1)*TIMER_LEVEL_BITS))-1);
272 /* Get all timers from this bucket. */
273 src = (time >> (level * TIMER_LEVEL_BITS)) % PER_LEVEL;
274 list_append_list(&list,
275 &timers->level[level]->list[src]);
279 /* Did we hit the last level? If so, add. */
280 if (need_level != -1)
281 add_level(timers, need_level);
283 /* Fast-forward the time, and re-add everyone. */
285 while ((i = list_pop(&list, struct timer, list)) != NULL)
286 timer_add_raw(timers, i);
289 /* Returns an expired timer. */
290 struct timer *timers_expire(struct timers *timers, struct timeabs expire)
292 uint64_t now = time_to_grains(expire);
296 assert(now >= timers->base);
298 if (!timers->level[0]) {
299 if (list_empty(&timers->far))
301 add_level(timers, 0);
305 if (timers->first > now) {
306 timer_fast_forward(timers, now);
310 timer_fast_forward(timers, timers->first);
311 off = timers->base % PER_LEVEL;
313 /* This *may* be NULL, if we deleted the first timer */
314 t = list_pop(&timers->level[0]->list[off], struct timer, list);
316 list_node_init(&t->list);
317 } while (!t && update_first(timers));
322 static bool timer_list_check(const struct list_head *l,
323 uint64_t min, uint64_t max, uint64_t first,
324 const char *abortstr)
326 const struct timer *t;
328 if (!list_check(l, abortstr))
331 list_for_each(l, t, list) {
332 if (t->time < min || t->time > max) {
335 "%s: timer %p %llu not %llu-%llu\n",
336 abortstr, t, (long long)t->time,
337 (long long)min, (long long)max);
342 if (t->time < first) {
345 "%s: timer %p %llu < minimum %llu\n",
346 abortstr, t, (long long)t->time,
356 struct timers *timers_check(const struct timers *timers, const char *abortstr)
358 unsigned int l, i, off;
362 if (!timers->level[0])
365 /* First level is simple. */
366 off = timers->base % PER_LEVEL;
367 for (i = 0; i < PER_LEVEL; i++) {
370 h = &timers->level[l]->list[(i+off) % PER_LEVEL];
371 if (!timer_list_check(h, timers->base + i, timers->base + i,
372 timers->first, abortstr))
376 /* For other levels, "current" bucket has been emptied, and may contain
377 * entries for the current + level_size bucket. */
378 for (l = 1; l < ARRAY_SIZE(timers->level) && timers->level[l]; l++) {
379 uint64_t per_bucket = 1ULL << (TIMER_LEVEL_BITS * l);
381 off = ((timers->base >> (l*TIMER_LEVEL_BITS)) % PER_LEVEL);
382 /* We start at *next* bucket. */
383 base = (timers->base & ~(per_bucket - 1)) + per_bucket;
385 for (i = 1; i <= PER_LEVEL; i++) {
388 h = &timers->level[l]->list[(i+off) % PER_LEVEL];
389 if (!timer_list_check(h, base, base + per_bucket - 1,
390 timers->first, abortstr))
397 base = (timers->base & ~((1ULL << (TIMER_LEVEL_BITS * l)) - 1))
398 + (1ULL << (TIMER_LEVEL_BITS * l)) - 1;
399 if (!timer_list_check(&timers->far, base, -1ULL, timers->first,
403 return (struct timers *)timers;
406 #ifdef CCAN_TIMER_DEBUG
407 static void dump_bucket_stats(FILE *fp, const struct list_head *h)
409 unsigned long long min, max, num;
420 list_for_each(h, t, list) {
427 fprintf(fp, " %llu (%llu-%llu)\n",
431 void timers_dump(const struct timers *timers, FILE *fp)
433 unsigned int l, i, off;
434 unsigned long long base;
439 fprintf(fp, "Base: %llu\n", (unsigned long long)timers->base);
441 if (!timers->level[0])
444 fprintf(fp, "Level 0:\n");
446 /* First level is simple. */
447 off = timers->base % PER_LEVEL;
448 for (i = 0; i < PER_LEVEL; i++) {
449 const struct list_head *h;
451 fprintf(fp, " Bucket %llu (%lu):",
452 (i+off) % PER_LEVEL, timers->base + i);
453 h = &timers->level[0]->list[(i+off) % PER_LEVEL];
454 dump_bucket_stats(fp, h);
457 /* For other levels, "current" bucket has been emptied, and may contain
458 * entries for the current + level_size bucket. */
459 for (l = 1; l < ARRAY_SIZE(timers->level) && timers->level[l]; l++) {
460 uint64_t per_bucket = 1ULL << (TIMER_LEVEL_BITS * l);
462 off = ((timers->base >> (l*TIMER_LEVEL_BITS)) % PER_LEVEL);
463 /* We start at *next* bucket. */
464 base = (timers->base & ~(per_bucket - 1)) + per_bucket;
466 fprintf(fp, "Level %u:\n", l);
467 for (i = 1; i <= PER_LEVEL; i++) {
468 const struct list_head *h;
470 fprintf(fp, " Bucket %llu (%llu - %llu):",
472 base, base + per_bucket - 1);
474 h = &timers->level[l]->list[(i+off) % PER_LEVEL];
475 dump_bucket_stats(fp, h);
481 if (!list_empty(&timers->far)) {
482 fprintf(fp, "Far timers:");
483 dump_bucket_stats(fp, &timers->far);
488 void timers_cleanup(struct timers *timers)
492 for (l = 0; l < ARRAY_SIZE(timers->level); l++)
493 free(timers->level[l]);