2 * linux/kernel/hrtimer.c
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * High-resolution kernel timers
10 * In contrast to the low-resolution timeout API implemented in
11 * kernel/timer.c, hrtimers provide finer resolution and accuracy
12 * depending on system configuration and capabilities.
14 * These timers are currently used for:
18 * - precise in-kernel timing
20 * Started by: Thomas Gleixner and Ingo Molnar
23 * based on kernel/timer.c
25 * Help, testing, suggestions, bugfixes, improvements were
28 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
31 * For licencing details see kernel-base/COPYING
34 #include <linux/cpu.h>
35 #include <linux/module.h>
36 #include <linux/percpu.h>
37 #include <linux/hrtimer.h>
38 #include <linux/notifier.h>
39 #include <linux/syscalls.h>
40 #include <linux/interrupt.h>
42 #include <asm/uaccess.h>
45 * ktime_get - get the monotonic time in ktime_t format
47 * returns the time in ktime_t format
49 static ktime_t ktime_get(void)
55 return timespec_to_ktime(now);
59 * ktime_get_real - get the real (wall-) time in ktime_t format
61 * returns the time in ktime_t format
63 static ktime_t ktime_get_real(void)
69 return timespec_to_ktime(now);
72 EXPORT_SYMBOL_GPL(ktime_get_real);
77 * Note: If we want to add new timer bases, we have to skip the two
78 * clock ids captured by the cpu-timers. We do this by holding empty
79 * entries rather than doing math adjustment of the clock ids.
80 * This ensures that we capture erroneous accesses to these clock ids
81 * rather than moving them into the range of valid clock id's.
83 static DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
89 .index = CLOCK_REALTIME,
90 .get_time = &ktime_get_real,
91 .resolution = KTIME_REALTIME_RES,
94 .index = CLOCK_MONOTONIC,
95 .get_time = &ktime_get,
96 .resolution = KTIME_MONOTONIC_RES,
102 * ktime_get_ts - get the monotonic clock in timespec format
103 * @ts: pointer to timespec variable
105 * The function calculates the monotonic clock from the realtime
106 * clock and the wall_to_monotonic offset and stores the result
107 * in normalized timespec format in the variable pointed to by @ts.
109 void ktime_get_ts(struct timespec *ts)
111 struct timespec tomono;
115 seq = read_seqbegin(&xtime_lock);
117 tomono = wall_to_monotonic;
119 } while (read_seqretry(&xtime_lock, seq));
121 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
122 ts->tv_nsec + tomono.tv_nsec);
124 EXPORT_SYMBOL_GPL(ktime_get_ts);
127 * Get the coarse grained time at the softirq based on xtime and
130 static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
132 ktime_t xtim, tomono;
137 seq = read_seqbegin(&xtime_lock);
139 getnstimeofday(&xts);
143 } while (read_seqretry(&xtime_lock, seq));
145 xtim = timespec_to_ktime(xts);
146 tomono = timespec_to_ktime(wall_to_monotonic);
147 base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
148 base->clock_base[CLOCK_MONOTONIC].softirq_time =
149 ktime_add(xtim, tomono);
153 * Functions and macros which are different for UP/SMP systems are kept in a
158 #define set_curr_timer(b, t) do { (b)->curr_timer = (t); } while (0)
161 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
162 * means that all timers which are tied to this base via timer->base are
163 * locked, and the base itself is locked too.
165 * So __run_timers/migrate_timers can safely modify all timers which could
166 * be found on the lists/queues.
168 * When the timer's base is locked, and the timer removed from list, it is
169 * possible to set timer->base = NULL and drop the lock: the timer remains
173 struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
174 unsigned long *flags)
176 struct hrtimer_clock_base *base;
180 if (likely(base != NULL)) {
181 spin_lock_irqsave(&base->cpu_base->lock, *flags);
182 if (likely(base == timer->base))
184 /* The timer has migrated to another CPU: */
185 spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
192 * Switch the timer base to the current CPU when possible.
194 static inline struct hrtimer_clock_base *
195 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base)
197 struct hrtimer_clock_base *new_base;
198 struct hrtimer_cpu_base *new_cpu_base;
200 new_cpu_base = &__get_cpu_var(hrtimer_bases);
201 new_base = &new_cpu_base->clock_base[base->index];
203 if (base != new_base) {
205 * We are trying to schedule the timer on the local CPU.
206 * However we can't change timer's base while it is running,
207 * so we keep it on the same CPU. No hassle vs. reprogramming
208 * the event source in the high resolution case. The softirq
209 * code will take care of this when the timer function has
210 * completed. There is no conflict as we hold the lock until
211 * the timer is enqueued.
213 if (unlikely(base->cpu_base->curr_timer == timer))
216 /* See the comment in lock_timer_base() */
218 spin_unlock(&base->cpu_base->lock);
219 spin_lock(&new_base->cpu_base->lock);
220 timer->base = new_base;
225 #else /* CONFIG_SMP */
227 #define set_curr_timer(b, t) do { } while (0)
229 static inline struct hrtimer_clock_base *
230 lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
232 struct hrtimer_clock_base *base = timer->base;
234 spin_lock_irqsave(&base->cpu_base->lock, *flags);
239 #define switch_hrtimer_base(t, b) (b)
241 #endif /* !CONFIG_SMP */
244 * Functions for the union type storage format of ktime_t which are
245 * too large for inlining:
247 #if BITS_PER_LONG < 64
248 # ifndef CONFIG_KTIME_SCALAR
250 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
252 * @nsec: the scalar nsec value to add
254 * Returns the sum of kt and nsec in ktime_t format
256 ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
260 if (likely(nsec < NSEC_PER_SEC)) {
263 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
265 tmp = ktime_set((long)nsec, rem);
268 return ktime_add(kt, tmp);
271 #else /* CONFIG_KTIME_SCALAR */
273 # endif /* !CONFIG_KTIME_SCALAR */
276 * Divide a ktime value by a nanosecond value
278 static unsigned long ktime_divns(const ktime_t kt, s64 div)
283 dclc = dns = ktime_to_ns(kt);
285 /* Make sure the divisor is less than 2^32: */
291 do_div(dclc, (unsigned long) div);
293 return (unsigned long) dclc;
296 #else /* BITS_PER_LONG < 64 */
297 # define ktime_divns(kt, div) (unsigned long)((kt).tv64 / (div))
298 #endif /* BITS_PER_LONG >= 64 */
301 * Timekeeping resumed notification
303 void hrtimer_notify_resume(void)
309 * Counterpart to lock_timer_base above:
312 void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
314 spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
318 * hrtimer_forward - forward the timer expiry
319 * @timer: hrtimer to forward
320 * @now: forward past this time
321 * @interval: the interval to forward
323 * Forward the timer expiry so it will expire in the future.
324 * Returns the number of overruns.
327 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
329 unsigned long orun = 1;
332 delta = ktime_sub(now, timer->expires);
337 if (interval.tv64 < timer->base->resolution.tv64)
338 interval.tv64 = timer->base->resolution.tv64;
340 if (unlikely(delta.tv64 >= interval.tv64)) {
341 s64 incr = ktime_to_ns(interval);
343 orun = ktime_divns(delta, incr);
344 timer->expires = ktime_add_ns(timer->expires, incr * orun);
345 if (timer->expires.tv64 > now.tv64)
348 * This (and the ktime_add() below) is the
349 * correction for exact:
353 timer->expires = ktime_add(timer->expires, interval);
359 * enqueue_hrtimer - internal function to (re)start a timer
361 * The timer is inserted in expiry order. Insertion into the
362 * red black tree is O(log(n)). Must hold the base lock.
364 static void enqueue_hrtimer(struct hrtimer *timer,
365 struct hrtimer_clock_base *base)
367 struct rb_node **link = &base->active.rb_node;
368 struct rb_node *parent = NULL;
369 struct hrtimer *entry;
372 * Find the right place in the rbtree:
376 entry = rb_entry(parent, struct hrtimer, node);
378 * We dont care about collisions. Nodes with
379 * the same expiry time stay together.
381 if (timer->expires.tv64 < entry->expires.tv64)
382 link = &(*link)->rb_left;
384 link = &(*link)->rb_right;
388 * Insert the timer to the rbtree and check whether it
389 * replaces the first pending timer
391 rb_link_node(&timer->node, parent, link);
392 rb_insert_color(&timer->node, &base->active);
394 if (!base->first || timer->expires.tv64 <
395 rb_entry(base->first, struct hrtimer, node)->expires.tv64)
396 base->first = &timer->node;
400 * __remove_hrtimer - internal function to remove a timer
402 * Caller must hold the base lock.
404 static void __remove_hrtimer(struct hrtimer *timer,
405 struct hrtimer_clock_base *base)
408 * Remove the timer from the rbtree and replace the
409 * first entry pointer if necessary.
411 if (base->first == &timer->node)
412 base->first = rb_next(&timer->node);
413 rb_erase(&timer->node, &base->active);
414 rb_set_parent(&timer->node, &timer->node);
418 * remove hrtimer, called with base lock held
421 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
423 if (hrtimer_active(timer)) {
424 __remove_hrtimer(timer, base);
431 * hrtimer_start - (re)start an relative timer on the current CPU
432 * @timer: the timer to be added
434 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
438 * 1 when the timer was active
441 hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
443 struct hrtimer_clock_base *base, *new_base;
447 base = lock_hrtimer_base(timer, &flags);
449 /* Remove an active timer from the queue: */
450 ret = remove_hrtimer(timer, base);
452 /* Switch the timer base, if necessary: */
453 new_base = switch_hrtimer_base(timer, base);
455 if (mode == HRTIMER_MODE_REL) {
456 tim = ktime_add(tim, new_base->get_time());
458 * CONFIG_TIME_LOW_RES is a temporary way for architectures
459 * to signal that they simply return xtime in
460 * do_gettimeoffset(). In this case we want to round up by
461 * resolution when starting a relative timer, to avoid short
462 * timeouts. This will go away with the GTOD framework.
464 #ifdef CONFIG_TIME_LOW_RES
465 tim = ktime_add(tim, base->resolution);
468 timer->expires = tim;
470 enqueue_hrtimer(timer, new_base);
472 unlock_hrtimer_base(timer, &flags);
476 EXPORT_SYMBOL_GPL(hrtimer_start);
479 * hrtimer_try_to_cancel - try to deactivate a timer
480 * @timer: hrtimer to stop
483 * 0 when the timer was not active
484 * 1 when the timer was active
485 * -1 when the timer is currently excuting the callback function and
488 int hrtimer_try_to_cancel(struct hrtimer *timer)
490 struct hrtimer_clock_base *base;
494 base = lock_hrtimer_base(timer, &flags);
496 if (base->cpu_base->curr_timer != timer)
497 ret = remove_hrtimer(timer, base);
499 unlock_hrtimer_base(timer, &flags);
504 EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
507 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
508 * @timer: the timer to be cancelled
511 * 0 when the timer was not active
512 * 1 when the timer was active
514 int hrtimer_cancel(struct hrtimer *timer)
517 int ret = hrtimer_try_to_cancel(timer);
524 EXPORT_SYMBOL_GPL(hrtimer_cancel);
527 * hrtimer_get_remaining - get remaining time for the timer
528 * @timer: the timer to read
530 ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
532 struct hrtimer_clock_base *base;
536 base = lock_hrtimer_base(timer, &flags);
537 rem = ktime_sub(timer->expires, base->get_time());
538 unlock_hrtimer_base(timer, &flags);
542 EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
544 #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
546 * hrtimer_get_next_event - get the time until next expiry event
548 * Returns the delta to the next expiry event or KTIME_MAX if no timer
551 ktime_t hrtimer_get_next_event(void)
553 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
554 struct hrtimer_clock_base *base = cpu_base->clock_base;
555 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
559 spin_lock_irqsave(&cpu_base->lock, flags);
561 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
562 struct hrtimer *timer;
567 timer = rb_entry(base->first, struct hrtimer, node);
568 delta.tv64 = timer->expires.tv64;
569 delta = ktime_sub(delta, base->get_time());
570 if (delta.tv64 < mindelta.tv64)
571 mindelta.tv64 = delta.tv64;
574 spin_unlock_irqrestore(&cpu_base->lock, flags);
576 if (mindelta.tv64 < 0)
583 * hrtimer_init - initialize a timer to the given clock
584 * @timer: the timer to be initialized
585 * @clock_id: the clock to be used
586 * @mode: timer mode abs/rel
588 void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
589 enum hrtimer_mode mode)
591 struct hrtimer_cpu_base *cpu_base;
593 memset(timer, 0, sizeof(struct hrtimer));
595 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
597 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
598 clock_id = CLOCK_MONOTONIC;
600 timer->base = &cpu_base->clock_base[clock_id];
601 rb_set_parent(&timer->node, &timer->node);
603 EXPORT_SYMBOL_GPL(hrtimer_init);
606 * hrtimer_get_res - get the timer resolution for a clock
607 * @which_clock: which clock to query
608 * @tp: pointer to timespec variable to store the resolution
610 * Store the resolution of the clock selected by @which_clock in the
611 * variable pointed to by @tp.
613 int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
615 struct hrtimer_cpu_base *cpu_base;
617 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
618 *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);
622 EXPORT_SYMBOL_GPL(hrtimer_get_res);
625 * Expire the per base hrtimer-queue:
627 static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
630 struct rb_node *node;
631 struct hrtimer_clock_base *base = &cpu_base->clock_base[index];
636 if (base->get_softirq_time)
637 base->softirq_time = base->get_softirq_time();
639 spin_lock_irq(&cpu_base->lock);
641 while ((node = base->first)) {
642 struct hrtimer *timer;
643 enum hrtimer_restart (*fn)(struct hrtimer *);
646 timer = rb_entry(node, struct hrtimer, node);
647 if (base->softirq_time.tv64 <= timer->expires.tv64)
650 fn = timer->function;
651 set_curr_timer(cpu_base, timer);
652 __remove_hrtimer(timer, base);
653 spin_unlock_irq(&cpu_base->lock);
657 spin_lock_irq(&cpu_base->lock);
659 if (restart != HRTIMER_NORESTART) {
660 BUG_ON(hrtimer_active(timer));
661 enqueue_hrtimer(timer, base);
664 set_curr_timer(cpu_base, NULL);
665 spin_unlock_irq(&cpu_base->lock);
669 * Called from timer softirq every jiffy, expire hrtimers:
671 void hrtimer_run_queues(void)
673 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
676 hrtimer_get_softirq_time(cpu_base);
678 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
679 run_hrtimer_queue(cpu_base, i);
683 * Sleep related functions:
685 static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
687 struct hrtimer_sleeper *t =
688 container_of(timer, struct hrtimer_sleeper, timer);
689 struct task_struct *task = t->task;
693 wake_up_process(task);
695 return HRTIMER_NORESTART;
698 void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
700 sl->timer.function = hrtimer_wakeup;
704 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
706 hrtimer_init_sleeper(t, current);
709 set_current_state(TASK_INTERRUPTIBLE);
710 hrtimer_start(&t->timer, t->timer.expires, mode);
714 hrtimer_cancel(&t->timer);
715 mode = HRTIMER_MODE_ABS;
717 } while (t->task && !signal_pending(current));
719 return t->task == NULL;
722 long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
724 struct hrtimer_sleeper t;
725 struct timespec __user *rmtp;
729 restart->fn = do_no_restart_syscall;
731 hrtimer_init(&t.timer, restart->arg0, HRTIMER_MODE_ABS);
732 t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2;
734 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
737 rmtp = (struct timespec __user *) restart->arg1;
739 time = ktime_sub(t.timer.expires, t.timer.base->get_time());
742 tu = ktime_to_timespec(time);
743 if (copy_to_user(rmtp, &tu, sizeof(tu)))
747 restart->fn = hrtimer_nanosleep_restart;
749 /* The other values in restart are already filled in */
750 return -ERESTART_RESTARTBLOCK;
753 long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
754 const enum hrtimer_mode mode, const clockid_t clockid)
756 struct restart_block *restart;
757 struct hrtimer_sleeper t;
761 hrtimer_init(&t.timer, clockid, mode);
762 t.timer.expires = timespec_to_ktime(*rqtp);
763 if (do_nanosleep(&t, mode))
766 /* Absolute timers do not update the rmtp value and restart: */
767 if (mode == HRTIMER_MODE_ABS)
768 return -ERESTARTNOHAND;
771 rem = ktime_sub(t.timer.expires, t.timer.base->get_time());
774 tu = ktime_to_timespec(rem);
775 if (copy_to_user(rmtp, &tu, sizeof(tu)))
779 restart = ¤t_thread_info()->restart_block;
780 restart->fn = hrtimer_nanosleep_restart;
781 restart->arg0 = (unsigned long) t.timer.base->index;
782 restart->arg1 = (unsigned long) rmtp;
783 restart->arg2 = t.timer.expires.tv64 & 0xFFFFFFFF;
784 restart->arg3 = t.timer.expires.tv64 >> 32;
786 return -ERESTART_RESTARTBLOCK;
790 sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
794 if (copy_from_user(&tu, rqtp, sizeof(tu)))
797 if (!timespec_valid(&tu))
800 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
804 * Functions related to boot-time initialization:
806 static void __devinit init_hrtimers_cpu(int cpu)
808 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
811 spin_lock_init(&cpu_base->lock);
812 lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key);
814 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
815 cpu_base->clock_base[i].cpu_base = cpu_base;
819 #ifdef CONFIG_HOTPLUG_CPU
821 static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
822 struct hrtimer_clock_base *new_base)
824 struct hrtimer *timer;
825 struct rb_node *node;
827 while ((node = rb_first(&old_base->active))) {
828 timer = rb_entry(node, struct hrtimer, node);
829 __remove_hrtimer(timer, old_base);
830 timer->base = new_base;
831 enqueue_hrtimer(timer, new_base);
835 static void migrate_hrtimers(int cpu)
837 struct hrtimer_cpu_base *old_base, *new_base;
840 BUG_ON(cpu_online(cpu));
841 old_base = &per_cpu(hrtimer_bases, cpu);
842 new_base = &get_cpu_var(hrtimer_bases);
846 spin_lock(&new_base->lock);
847 spin_lock(&old_base->lock);
849 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
850 BUG_ON(old_base->curr_timer);
852 migrate_hrtimer_list(&old_base->clock_base[i],
853 &new_base->clock_base[i]);
855 spin_unlock(&old_base->lock);
856 spin_unlock(&new_base->lock);
859 put_cpu_var(hrtimer_bases);
861 #endif /* CONFIG_HOTPLUG_CPU */
863 static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
864 unsigned long action, void *hcpu)
866 long cpu = (long)hcpu;
871 init_hrtimers_cpu(cpu);
874 #ifdef CONFIG_HOTPLUG_CPU
876 migrate_hrtimers(cpu);
887 static struct notifier_block __cpuinitdata hrtimers_nb = {
888 .notifier_call = hrtimer_cpu_notify,
891 void __init hrtimers_init(void)
893 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
894 (void *)(long)smp_processor_id());
895 register_cpu_notifier(&hrtimers_nb);