1 /* LGPL (v2.1 or any later version) - see LICENSE file for details */
2 #include <ccan/timer/timer.h>
3 #include <ccan/array_size/array_size.h>
4 #include <ccan/ilog/ilog.h>
8 #define PER_LEVEL (1ULL << TIMER_LEVEL_BITS)
11 struct list_head list[PER_LEVEL];
14 static void *timer_default_alloc(struct timers *timers, size_t len)
19 static void timer_default_free(struct timers *timers, void *p)
24 static void *(*timer_alloc)(struct timers *, size_t) = timer_default_alloc;
25 static void (*timer_free)(struct timers *, void *) = timer_default_free;
27 void timers_set_allocator(void *(*alloc)(struct timers *, size_t len),
28 void (*free)(struct timers *, void *p))
31 alloc = timer_default_alloc;
33 free = timer_default_free;
38 static uint64_t time_to_grains(struct timemono t)
40 return t.ts.tv_sec * ((uint64_t)1000000000 / TIMER_GRANULARITY)
41 + (t.ts.tv_nsec / TIMER_GRANULARITY);
44 static struct timemono grains_to_time(uint64_t grains)
48 t.ts.tv_sec = grains / (1000000000 / TIMER_GRANULARITY);
49 t.ts.tv_nsec = (grains % (1000000000 / TIMER_GRANULARITY))
54 void timers_init(struct timers *timers, struct timemono start)
58 list_head_init(&timers->far);
59 timers->base = time_to_grains(start);
60 timers->first = -1ULL;
61 memset(timers->firsts, 0xFF, sizeof(timers->firsts));
62 for (i = 0; i < ARRAY_SIZE(timers->level); i++)
63 timers->level[i] = NULL;
66 static unsigned int level_of(const struct timers *timers, uint64_t time)
70 /* Level depends how far away it is. */
71 diff = time - timers->base;
72 return ilog64(diff / 2) / TIMER_LEVEL_BITS;
75 static void timer_add_raw(struct timers *timers, struct timer *t)
78 unsigned int level = level_of(timers, t->time);
81 if (!timers->level[level]) {
83 first = &timers->firsts[ARRAY_SIZE(timers->level)];
85 int off = (t->time >> (level*TIMER_LEVEL_BITS)) & (PER_LEVEL-1);
86 l = &timers->level[level]->list[off];
87 first = &timers->firsts[level];
90 list_add_tail(l, &t->list);
95 void timer_init(struct timer *t)
97 list_node_init(&t->list);
100 static inline bool list_node_initted(const struct list_node *n)
105 void timer_addrel(struct timers *timers, struct timer *t, struct timerel rel)
107 assert(list_node_initted(&t->list));
109 t->time = time_to_grains(timemono_add(time_mono(), rel));
111 #if TIME_HAVE_MONOTONIC
112 assert(t->time >= timers->base);
114 /* Added in the past? Treat it as imminent. */
115 if (t->time < timers->base)
116 t->time = timers->base;
118 if (t->time < timers->first)
119 timers->first = t->time;
121 timer_add_raw(timers, t);
124 void timer_addmono(struct timers *timers, struct timer *t, struct timemono when)
126 assert(list_node_initted(&t->list));
128 t->time = time_to_grains(when);
130 /* Added in the past? Treat it as imminent. */
131 if (t->time < timers->base)
132 t->time = timers->base;
133 if (t->time < timers->first)
134 timers->first = t->time;
136 timer_add_raw(timers, t);
140 void timer_del(struct timers *timers UNNEEDED, struct timer *t)
142 list_del_init(&t->list);
145 static void timers_far_get(struct timers *timers,
146 struct list_head *list,
149 struct timer *i, *next;
151 list_for_each_safe(&timers->far, i, next, list) {
152 if (i->time <= when) {
153 list_del_from(&timers->far, &i->list);
154 list_add_tail(list, &i->list);
159 static void add_level(struct timers *timers, unsigned int level)
161 struct timer_level *l;
164 struct list_head from_far;
166 l = timer_alloc(timers, sizeof(*l));
170 for (i = 0; i < ARRAY_SIZE(l->list); i++)
171 list_head_init(&l->list[i]);
172 timers->level[level] = l;
174 list_head_init(&from_far);
175 timers_far_get(timers, &from_far,
176 timers->base + (1ULL << ((level+1)*TIMER_LEVEL_BITS)) - 1);
178 while ((t = list_pop(&from_far, struct timer, list)) != NULL)
179 timer_add_raw(timers, t);
182 /* We don't need to search past the first at level 0, since the
183 * bucket range is 1; they're all the same. */
184 static const struct timer *find_first(const struct list_head *list,
186 const struct timer *prev)
190 list_for_each(list, t, list) {
191 if (!prev || t->time < prev->time)
199 /* Update level's first watermark, and return overall first. */
200 static const struct timer *first_for_level(struct timers *timers,
202 const struct timer *level_first,
203 const struct timer *first)
206 timers->firsts[level] = level_first->time;
207 if (!first || level_first->time < first->time)
210 timers->firsts[level] = -1ULL;
215 static bool level_may_beat(const struct timers *timers, size_t level,
216 const struct timer *first)
218 return !first || timers->firsts[level] < first->time;
221 /* FIXME: Suboptimal */
222 static const struct timer *brute_force_first(struct timers *timers)
225 const struct timer *found = NULL;
227 for (l = 0; l < ARRAY_SIZE(timers->level) && timers->level[l]; l++) {
228 const struct timer *t = NULL;
230 /* Do we know they don't have a better one? */
231 if (!level_may_beat(timers, l, found))
234 /* Find first timer on this level. */
235 for (i = 0; i < PER_LEVEL; i++)
236 t = find_first(&timers->level[l]->list[i], l, t);
238 found = first_for_level(timers, l, t, found);
241 /* Check (and update) far list if there's a chance. */
242 l = ARRAY_SIZE(timers->level);
243 if (level_may_beat(timers, l, found)) {
244 const struct timer *t = find_first(&timers->far, l, NULL);
245 found = first_for_level(timers, l, t, found);
251 static const struct timer *get_first(struct timers *timers)
253 /* We can have just far timers, for example. */
254 if (timers->level[0]) {
255 /* First search rest of lower buckets; we've already spilled
256 * so if we find one there we don't need to search further. */
257 unsigned int i, off = timers->base % PER_LEVEL;
259 for (i = off; i < PER_LEVEL; i++) {
260 struct list_head *h = &timers->level[0]->list[i];
262 return find_first(h, 0, NULL);
266 /* From here on, we're searching non-normalized parts of the
267 * data structure, which is much subtler.
269 * So we brute force. */
270 return brute_force_first(timers);
273 static bool update_first(struct timers *timers)
275 const struct timer *found = get_first(timers);
278 timers->first = -1ULL;
282 timers->first = found->time;
286 bool timer_earliest(struct timers *timers, struct timemono *first)
288 if (!update_first(timers))
291 *first = grains_to_time(timers->first);
295 /* Assume no timers before 'time', cascade down and update base time. */
296 static void timer_fast_forward(struct timers *timers, uint64_t time)
298 unsigned int level, changed;
300 struct list_head list;
303 /* How many bits changed between base and time?
304 * Each time we wrap, we need to empty buckets from above. */
305 if (time == timers->base)
308 changed = ilog64_nz(time ^ timers->base);
309 level = (changed - 1) / TIMER_LEVEL_BITS;
311 /* Buckets always empty downwards, so we could cascade manually,
312 * but it's rarely very many so we just remove and re-add */
313 list_head_init(&list);
316 if (!timers->level[level]) {
317 /* We need any which belong on this level. */
318 timers_far_get(timers, &list,
320 + (1ULL << ((level+1)*TIMER_LEVEL_BITS))-1);
325 /* Get all timers from this bucket. */
326 src = (time >> (level * TIMER_LEVEL_BITS)) % PER_LEVEL;
327 list_append_list(&list,
328 &timers->level[level]->list[src]);
332 /* Did we hit the last level? If so, add. */
333 if (need_level != -1)
334 add_level(timers, need_level);
336 /* Fast-forward the time, and re-add everyone. */
338 while ((i = list_pop(&list, struct timer, list)) != NULL)
339 timer_add_raw(timers, i);
342 /* Returns an expired timer. */
343 struct timer *timers_expire(struct timers *timers, struct timemono expire)
345 uint64_t now = time_to_grains(expire);
349 assert(now >= timers->base);
351 if (!timers->level[0]) {
352 if (list_empty(&timers->far))
354 add_level(timers, 0);
358 if (timers->first > now) {
359 timer_fast_forward(timers, now);
363 timer_fast_forward(timers, timers->first);
364 off = timers->base % PER_LEVEL;
366 /* This *may* be NULL, if we deleted the first timer */
367 t = list_pop(&timers->level[0]->list[off], struct timer, list);
369 list_node_init(&t->list);
370 } while (!t && update_first(timers));
375 static bool timer_list_check(const struct list_head *l,
376 uint64_t min, uint64_t max, uint64_t first,
377 const char *abortstr)
379 const struct timer *t;
381 if (!list_check(l, abortstr))
384 list_for_each(l, t, list) {
385 if (t->time < min || t->time > max) {
388 "%s: timer %p %llu not %llu-%llu\n",
389 abortstr, t, (long long)t->time,
390 (long long)min, (long long)max);
395 if (t->time < first) {
398 "%s: timer %p %llu < minimum %llu\n",
399 abortstr, t, (long long)t->time,
409 struct timers *timers_check(const struct timers *timers, const char *abortstr)
411 unsigned int l, i, off;
415 if (!timers->level[0])
418 /* First level is simple. */
419 off = timers->base % PER_LEVEL;
420 for (i = 0; i < PER_LEVEL; i++) {
423 h = &timers->level[l]->list[(i+off) % PER_LEVEL];
424 if (!timer_list_check(h, timers->base + i, timers->base + i,
425 timers->firsts[l], abortstr))
429 /* For other levels, "current" bucket has been emptied, and may contain
430 * entries for the current + level_size bucket. */
431 for (l = 1; l < ARRAY_SIZE(timers->level) && timers->level[l]; l++) {
432 uint64_t per_bucket = 1ULL << (TIMER_LEVEL_BITS * l);
434 off = ((timers->base >> (l*TIMER_LEVEL_BITS)) % PER_LEVEL);
435 /* We start at *next* bucket. */
436 base = (timers->base & ~(per_bucket - 1)) + per_bucket;
438 for (i = 1; i <= PER_LEVEL; i++) {
441 h = &timers->level[l]->list[(i+off) % PER_LEVEL];
442 if (!timer_list_check(h, base, base + per_bucket - 1,
443 timers->firsts[l], abortstr))
450 base = (timers->base & ~((1ULL << (TIMER_LEVEL_BITS * l)) - 1))
451 + (1ULL << (TIMER_LEVEL_BITS * l)) - 1;
452 if (!timer_list_check(&timers->far, base, -1ULL,
453 timers->firsts[ARRAY_SIZE(timers->level)],
457 return (struct timers *)timers;
460 #ifdef CCAN_TIMER_DEBUG
461 static void dump_bucket_stats(FILE *fp, const struct list_head *h)
463 unsigned long long min, max, num;
474 list_for_each(h, t, list) {
481 fprintf(fp, " %llu (%llu-%llu)\n",
485 void timers_dump(const struct timers *timers, FILE *fp)
487 unsigned int l, i, off;
488 unsigned long long base;
493 fprintf(fp, "Base: %llu\n", (unsigned long long)timers->base);
495 if (!timers->level[0])
498 fprintf(fp, "Level 0:\n");
500 /* First level is simple. */
501 off = timers->base % PER_LEVEL;
502 for (i = 0; i < PER_LEVEL; i++) {
503 const struct list_head *h;
505 fprintf(fp, " Bucket %llu (%lu):",
506 (i+off) % PER_LEVEL, timers->base + i);
507 h = &timers->level[0]->list[(i+off) % PER_LEVEL];
508 dump_bucket_stats(fp, h);
511 /* For other levels, "current" bucket has been emptied, and may contain
512 * entries for the current + level_size bucket. */
513 for (l = 1; l < ARRAY_SIZE(timers->level) && timers->level[l]; l++) {
514 uint64_t per_bucket = 1ULL << (TIMER_LEVEL_BITS * l);
516 off = ((timers->base >> (l*TIMER_LEVEL_BITS)) % PER_LEVEL);
517 /* We start at *next* bucket. */
518 base = (timers->base & ~(per_bucket - 1)) + per_bucket;
520 fprintf(fp, "Level %u:\n", l);
521 for (i = 1; i <= PER_LEVEL; i++) {
522 const struct list_head *h;
524 fprintf(fp, " Bucket %llu (%llu - %llu):",
526 base, base + per_bucket - 1);
528 h = &timers->level[l]->list[(i+off) % PER_LEVEL];
529 dump_bucket_stats(fp, h);
535 if (!list_empty(&timers->far)) {
536 fprintf(fp, "Far timers:");
537 dump_bucket_stats(fp, &timers->far);
542 void timers_cleanup(struct timers *timers)
546 for (l = 0; l < ARRAY_SIZE(timers->level); l++)
547 timer_free(timers, timers->level[l]);