2 * linux/arch/x86-64/kernel/time.c
4 * "High Precision Event Timer" based timekeeping.
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/mc146818rtc.h>
21 #include <linux/time.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/device.h>
25 #include <linux/sysdev.h>
26 #include <linux/bcd.h>
27 #include <linux/kallsyms.h>
28 #include <linux/acpi.h>
30 #include <acpi/achware.h> /* for PM timer frequency */
32 #include <asm/8253pit.h>
33 #include <asm/pgtable.h>
34 #include <asm/vsyscall.h>
35 #include <asm/timex.h>
36 #include <asm/proto.h>
38 #include <asm/sections.h>
39 #include <linux/cpufreq.h>
40 #include <linux/hpet.h>
41 #ifdef CONFIG_X86_LOCAL_APIC
45 #ifdef CONFIG_CPU_FREQ
46 static void cpufreq_delayed_get(void);
48 extern void i8254_timer_resume(void);
49 extern int using_apic_timer;
51 static char *time_init_gtod(void);
53 DEFINE_SPINLOCK(rtc_lock);
54 DEFINE_SPINLOCK(i8253_lock);
56 int nohpet __initdata = 0;
57 static int notsc __initdata = 0;
59 #undef HPET_HACK_ENABLE_DANGEROUS
61 unsigned int cpu_khz; /* TSC clocks / usec, not used here */
62 static unsigned long hpet_period; /* fsecs / HPET clock */
63 unsigned long hpet_tick; /* HPET clocks / interrupt */
64 int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
65 unsigned long vxtime_hz = PIT_TICK_RATE;
66 int report_lost_ticks; /* command line option */
67 unsigned long long monotonic_base;
69 struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
71 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
72 unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
73 struct timespec __xtime __section_xtime;
74 struct timezone __sys_tz __section_sys_tz;
77 * do_gettimeoffset() returns microseconds since last timer interrupt was
78 * triggered by hardware. A memory read of HPET is slower than a register read
79 * of TSC, but much more reliable. It's also synchronized to the timer
80 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
81 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
82 * This is not a problem, because jiffies hasn't updated either. They are bound
83 * together by xtime_lock.
86 static inline unsigned int do_gettimeoffset_tsc(void)
90 t = get_cycles_sync();
91 if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
92 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
96 static inline unsigned int do_gettimeoffset_hpet(void)
98 /* cap counter read to one tick to avoid inconsistencies */
99 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
100 return (min(counter,hpet_tick) * vxtime.quot) >> 32;
103 unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
106 * This version of gettimeofday() has microsecond resolution and better than
107 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
111 void do_gettimeofday(struct timeval *tv)
113 unsigned long seq, t;
114 unsigned int sec, usec;
117 seq = read_seqbegin(&xtime_lock);
120 usec = xtime.tv_nsec / 1000;
122 /* i386 does some correction here to keep the clock
123 monotonous even when ntpd is fixing drift.
124 But they didn't work for me, there is a non monotonic
125 clock anyways with ntp.
126 I dropped all corrections now until a real solution can
127 be found. Note when you fix it here you need to do the same
128 in arch/x86_64/kernel/vsyscall.c and export all needed
129 variables in vmlinux.lds. -AK */
131 t = (jiffies - wall_jiffies) * (1000000L / HZ) +
135 } while (read_seqretry(&xtime_lock, seq));
137 tv->tv_sec = sec + usec / 1000000;
138 tv->tv_usec = usec % 1000000;
141 EXPORT_SYMBOL(do_gettimeofday);
144 * settimeofday() first undoes the correction that gettimeofday would do
145 * on the time, and then saves it. This is ugly, but has been like this for
149 int do_settimeofday(struct timespec *tv)
151 time_t wtm_sec, sec = tv->tv_sec;
152 long wtm_nsec, nsec = tv->tv_nsec;
154 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
157 write_seqlock_irq(&xtime_lock);
159 nsec -= do_gettimeoffset() * 1000 +
160 (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
162 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
163 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
165 set_normalized_timespec(&xtime, sec, nsec);
166 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
170 write_sequnlock_irq(&xtime_lock);
175 EXPORT_SYMBOL(do_settimeofday);
177 unsigned long profile_pc(struct pt_regs *regs)
179 unsigned long pc = instruction_pointer(regs);
181 /* Assume the lock function has either no stack frame or only a single word.
182 This checks if the address on the stack looks like a kernel text address.
183 There is a small window for false hits, but in that case the tick
184 is just accounted to the spinlock function.
185 Better would be to write these functions in assembler again
186 and check exactly. */
187 if (in_lock_functions(pc)) {
188 char *v = *(char **)regs->rsp;
189 if ((v >= _stext && v <= _etext) ||
190 (v >= _sinittext && v <= _einittext) ||
191 (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
192 return (unsigned long)v;
193 return ((unsigned long *)regs->rsp)[1];
197 EXPORT_SYMBOL(profile_pc);
200 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
201 * ms after the second nowtime has started, because when nowtime is written
202 * into the registers of the CMOS clock, it will jump to the next second
203 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
207 static void set_rtc_mmss(unsigned long nowtime)
209 int real_seconds, real_minutes, cmos_minutes;
210 unsigned char control, freq_select;
213 * IRQs are disabled when we're called from the timer interrupt,
214 * no need for spin_lock_irqsave()
217 spin_lock(&rtc_lock);
220 * Tell the clock it's being set and stop it.
223 control = CMOS_READ(RTC_CONTROL);
224 CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
226 freq_select = CMOS_READ(RTC_FREQ_SELECT);
227 CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
229 cmos_minutes = CMOS_READ(RTC_MINUTES);
230 BCD_TO_BIN(cmos_minutes);
233 * since we're only adjusting minutes and seconds, don't interfere with hour
234 * overflow. This avoids messing with unknown time zones but requires your RTC
235 * not to be off by more than 15 minutes. Since we're calling it only when
236 * our clock is externally synchronized using NTP, this shouldn't be a problem.
239 real_seconds = nowtime % 60;
240 real_minutes = nowtime / 60;
241 if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
242 real_minutes += 30; /* correct for half hour time zone */
246 /* AMD 8111 is a really bad time keeper and hits this regularly.
247 It probably was an attempt to avoid screwing up DST, but ignore
249 if (abs(real_minutes - cmos_minutes) >= 30) {
250 printk(KERN_WARNING "time.c: can't update CMOS clock "
251 "from %d to %d\n", cmos_minutes, real_minutes);
256 BIN_TO_BCD(real_seconds);
257 BIN_TO_BCD(real_minutes);
258 CMOS_WRITE(real_seconds, RTC_SECONDS);
259 CMOS_WRITE(real_minutes, RTC_MINUTES);
263 * The following flags have to be released exactly in this order, otherwise the
264 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
265 * not reset the oscillator and will not update precisely 500 ms later. You
266 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
267 * believes data sheets anyway ... -- Markus Kuhn
270 CMOS_WRITE(control, RTC_CONTROL);
271 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
273 spin_unlock(&rtc_lock);
277 /* monotonic_clock(): returns # of nanoseconds passed since time_init()
278 * Note: This function is required to return accurate
279 * time even in the absence of multiple timer ticks.
281 unsigned long long monotonic_clock(void)
284 u32 last_offset, this_offset, offset;
285 unsigned long long base;
287 if (vxtime.mode == VXTIME_HPET) {
289 seq = read_seqbegin(&xtime_lock);
291 last_offset = vxtime.last;
292 base = monotonic_base;
293 this_offset = hpet_readl(HPET_COUNTER);
294 } while (read_seqretry(&xtime_lock, seq));
295 offset = (this_offset - last_offset);
296 offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
297 return base + offset;
300 seq = read_seqbegin(&xtime_lock);
302 last_offset = vxtime.last_tsc;
303 base = monotonic_base;
304 } while (read_seqretry(&xtime_lock, seq));
305 this_offset = get_cycles_sync();
306 offset = (this_offset - last_offset)*1000/cpu_khz;
307 return base + offset;
310 EXPORT_SYMBOL(monotonic_clock);
312 static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
314 static long lost_count;
317 if (report_lost_ticks) {
318 printk(KERN_WARNING "time.c: Lost %d timer "
320 print_symbol("rip %s)\n", regs->rip);
323 if (lost_count == 1000 && !warned) {
325 "warning: many lost ticks.\n"
326 KERN_WARNING "Your time source seems to be instable or "
327 "some driver is hogging interupts\n");
328 print_symbol("rip %s\n", regs->rip);
329 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
330 printk(KERN_WARNING "Falling back to HPET\n");
332 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
334 vxtime.last = hpet_readl(HPET_COUNTER);
335 vxtime.mode = VXTIME_HPET;
336 do_gettimeoffset = do_gettimeoffset_hpet;
338 /* else should fall back to PIT, but code missing. */
343 #ifdef CONFIG_CPU_FREQ
344 /* In some cases the CPU can change frequency without us noticing
345 (like going into thermal throttle)
346 Give cpufreq a change to catch up. */
347 if ((lost_count+1) % 25 == 0) {
348 cpufreq_delayed_get();
353 void main_timer_handler(struct pt_regs *regs)
355 static unsigned long rtc_update = 0;
357 int delay = 0, offset = 0, lost = 0;
360 * Here we are in the timer irq handler. We have irqs locally disabled (so we
361 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
362 * on the other CPU, so we need a lock. We also need to lock the vsyscall
363 * variables, because both do_timer() and us change them -arca+vojtech
366 write_seqlock(&xtime_lock);
368 if (vxtime.hpet_address)
369 offset = hpet_readl(HPET_COUNTER);
371 if (hpet_use_timer) {
372 /* if we're using the hpet timer functionality,
373 * we can more accurately know the counter value
374 * when the timer interrupt occured.
376 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
377 delay = hpet_readl(HPET_COUNTER) - offset;
378 } else if (!pmtmr_ioport) {
379 spin_lock(&i8253_lock);
382 delay |= inb(0x40) << 8;
383 spin_unlock(&i8253_lock);
384 delay = LATCH - 1 - delay;
387 tsc = get_cycles_sync();
389 if (vxtime.mode == VXTIME_HPET) {
390 if (offset - vxtime.last > hpet_tick) {
391 lost = (offset - vxtime.last) / hpet_tick - 1;
395 (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
397 vxtime.last = offset;
398 #ifdef CONFIG_X86_PM_TIMER
399 } else if (vxtime.mode == VXTIME_PMTMR) {
400 lost = pmtimer_mark_offset();
403 offset = (((tsc - vxtime.last_tsc) *
404 vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
409 if (offset > (USEC_PER_SEC / HZ)) {
410 lost = offset / (USEC_PER_SEC / HZ);
411 offset %= (USEC_PER_SEC / HZ);
414 monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
416 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
418 if ((((tsc - vxtime.last_tsc) *
419 vxtime.tsc_quot) >> 32) < offset)
420 vxtime.last_tsc = tsc -
421 (((long) offset << 32) / vxtime.tsc_quot) - 1;
425 handle_lost_ticks(lost, regs);
430 * Do the timer stuff.
435 update_process_times(user_mode(regs));
439 * In the SMP case we use the local APIC timer interrupt to do the profiling,
440 * except when we simulate SMP mode on a uniprocessor system, in that case we
441 * have to call the local interrupt handler.
444 #ifndef CONFIG_X86_LOCAL_APIC
445 profile_tick(CPU_PROFILING, regs);
447 if (!using_apic_timer)
448 smp_local_timer_interrupt(regs);
452 * If we have an externally synchronized Linux clock, then update CMOS clock
453 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
454 * closest to exactly 500 ms before the next second. If the update fails, we
455 * don't care, as it'll be updated on the next turn, and the problem (time way
456 * off) isn't likely to go away much sooner anyway.
459 if (ntp_synced() && xtime.tv_sec > rtc_update &&
460 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
461 set_rtc_mmss(xtime.tv_sec);
462 rtc_update = xtime.tv_sec + 660;
465 write_sequnlock(&xtime_lock);
468 static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
470 if (apic_runs_main_timer > 1)
472 main_timer_handler(regs);
473 #ifdef CONFIG_X86_LOCAL_APIC
474 if (using_apic_timer)
475 smp_send_timer_broadcast_ipi();
480 static unsigned int cyc2ns_scale __read_mostly;
481 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
483 static inline void set_cyc2ns_scale(unsigned long cpu_khz)
485 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
488 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
490 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
493 unsigned long long sched_clock(void)
498 /* Don't do a HPET read here. Using TSC always is much faster
499 and HPET may not be mapped yet when the scheduler first runs.
500 Disadvantage is a small drift between CPUs in some configurations,
501 but that should be tolerable. */
502 if (__vxtime.mode == VXTIME_HPET)
503 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
506 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
507 which means it is not completely exact and may not be monotonous between
508 CPUs. But the errors should be too small to matter for scheduling
512 return cycles_2_ns(a);
515 static unsigned long get_cmos_time(void)
517 unsigned int timeout = 1000000, year, mon, day, hour, min, sec;
518 unsigned char uip = 0, this = 0;
520 unsigned extyear = 0;
523 * The Linux interpretation of the CMOS clock register contents: When the
524 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
525 * second which has precisely just started. Waiting for this can take up to 1
526 * second, we timeout approximately after 2.4 seconds on a machine with
527 * standard 8.3 MHz ISA bus.
530 spin_lock_irqsave(&rtc_lock, flags);
532 while (timeout && (!uip || this)) {
534 this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
539 * Here we are safe to assume the registers won't change for a whole
540 * second, so we just go ahead and read them.
542 sec = CMOS_READ(RTC_SECONDS);
543 min = CMOS_READ(RTC_MINUTES);
544 hour = CMOS_READ(RTC_HOURS);
545 day = CMOS_READ(RTC_DAY_OF_MONTH);
546 mon = CMOS_READ(RTC_MONTH);
547 year = CMOS_READ(RTC_YEAR);
550 if (acpi_fadt.revision >= FADT2_REVISION_ID && acpi_fadt.century)
551 extyear = CMOS_READ(acpi_fadt.century);
554 spin_unlock_irqrestore(&rtc_lock, flags);
557 * We know that x86-64 always uses BCD format, no need to check the
571 printk(KERN_INFO "Extended CMOS year: %d\n", extyear);
574 * x86-64 systems only exists since 2002.
575 * This will work up to Dec 31, 2100
580 return mktime(year, mon, day, hour, min, sec);
583 #ifdef CONFIG_CPU_FREQ
585 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
588 RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
589 not that important because current Opteron setups do not support
590 scaling on SMP anyroads.
592 Should fix up last_tsc too. Currently gettimeofday in the
593 first tick after the change will be slightly wrong. */
595 #include <linux/workqueue.h>
597 static unsigned int cpufreq_delayed_issched = 0;
598 static unsigned int cpufreq_init = 0;
599 static struct work_struct cpufreq_delayed_get_work;
601 static void handle_cpufreq_delayed_get(void *v)
604 for_each_online_cpu(cpu) {
607 cpufreq_delayed_issched = 0;
610 /* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
611 * to verify the CPU frequency the timing core thinks the CPU is running
612 * at is still correct.
614 static void cpufreq_delayed_get(void)
617 if (cpufreq_init && !cpufreq_delayed_issched) {
618 cpufreq_delayed_issched = 1;
621 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
623 schedule_work(&cpufreq_delayed_get_work);
627 static unsigned int ref_freq = 0;
628 static unsigned long loops_per_jiffy_ref = 0;
630 static unsigned long cpu_khz_ref = 0;
632 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
635 struct cpufreq_freqs *freq = data;
636 unsigned long *lpj, dummy;
638 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
642 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
644 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
646 lpj = &boot_cpu_data.loops_per_jiffy;
650 ref_freq = freq->old;
651 loops_per_jiffy_ref = *lpj;
652 cpu_khz_ref = cpu_khz;
654 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
655 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
656 (val == CPUFREQ_RESUMECHANGE)) {
658 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
660 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
661 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
662 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
665 set_cyc2ns_scale(cpu_khz_ref);
670 static struct notifier_block time_cpufreq_notifier_block = {
671 .notifier_call = time_cpufreq_notifier
674 static int __init cpufreq_tsc(void)
676 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
677 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
678 CPUFREQ_TRANSITION_NOTIFIER))
683 core_initcall(cpufreq_tsc);
688 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
689 * it to the HPET timer of known frequency.
692 #define TICK_COUNT 100000000
694 static unsigned int __init hpet_calibrate_tsc(void)
696 int tsc_start, hpet_start;
697 int tsc_now, hpet_now;
700 local_irq_save(flags);
703 hpet_start = hpet_readl(HPET_COUNTER);
708 hpet_now = hpet_readl(HPET_COUNTER);
709 tsc_now = get_cycles_sync();
710 local_irq_restore(flags);
711 } while ((tsc_now - tsc_start) < TICK_COUNT &&
712 (hpet_now - hpet_start) < TICK_COUNT);
714 return (tsc_now - tsc_start) * 1000000000L
715 / ((hpet_now - hpet_start) * hpet_period / 1000);
720 * pit_calibrate_tsc() uses the speaker output (channel 2) of
721 * the PIT. This is better than using the timer interrupt output,
722 * because we can read the value of the speaker with just one inb(),
723 * where we need three i/o operations for the interrupt channel.
724 * We count how many ticks the TSC does in 50 ms.
727 static unsigned int __init pit_calibrate_tsc(void)
729 unsigned long start, end;
732 spin_lock_irqsave(&i8253_lock, flags);
734 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
737 outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
738 outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
739 start = get_cycles_sync();
740 while ((inb(0x61) & 0x20) == 0);
741 end = get_cycles_sync();
743 spin_unlock_irqrestore(&i8253_lock, flags);
745 return (end - start) / 50;
749 static __init int late_hpet_init(void)
754 if (!vxtime.hpet_address)
757 memset(&hd, 0, sizeof (hd));
759 ntimer = hpet_readl(HPET_ID);
760 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
764 * Register with driver.
765 * Timer0 and Timer1 is used by platform.
767 hd.hd_phys_address = vxtime.hpet_address;
768 hd.hd_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE);
769 hd.hd_nirqs = ntimer;
770 hd.hd_flags = HPET_DATA_PLATFORM;
771 hpet_reserve_timer(&hd, 0);
772 #ifdef CONFIG_HPET_EMULATE_RTC
773 hpet_reserve_timer(&hd, 1);
775 hd.hd_irq[0] = HPET_LEGACY_8254;
776 hd.hd_irq[1] = HPET_LEGACY_RTC;
779 struct hpet_timer *timer;
782 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
784 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
786 hd.hd_irq[i] = (timer->hpet_config &
787 Tn_INT_ROUTE_CNF_MASK) >>
788 Tn_INT_ROUTE_CNF_SHIFT;
795 fs_initcall(late_hpet_init);
798 static int hpet_timer_stop_set_go(unsigned long tick)
803 * Stop the timers and reset the main counter.
806 cfg = hpet_readl(HPET_CFG);
807 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
808 hpet_writel(cfg, HPET_CFG);
809 hpet_writel(0, HPET_COUNTER);
810 hpet_writel(0, HPET_COUNTER + 4);
813 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
814 * and period also hpet_tick.
816 if (hpet_use_timer) {
817 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
818 HPET_TN_32BIT, HPET_T0_CFG);
819 hpet_writel(hpet_tick, HPET_T0_CMP);
820 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
821 cfg |= HPET_CFG_LEGACY;
827 cfg |= HPET_CFG_ENABLE;
828 hpet_writel(cfg, HPET_CFG);
833 static int hpet_init(void)
837 if (!vxtime.hpet_address)
839 set_fixmap_nocache(FIX_HPET_BASE, vxtime.hpet_address);
840 __set_fixmap(VSYSCALL_HPET, vxtime.hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
843 * Read the period, compute tick and quotient.
846 id = hpet_readl(HPET_ID);
848 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
851 hpet_period = hpet_readl(HPET_PERIOD);
852 if (hpet_period < 100000 || hpet_period > 100000000)
855 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
858 hpet_use_timer = (id & HPET_ID_LEGSUP);
860 return hpet_timer_stop_set_go(hpet_tick);
863 static int hpet_reenable(void)
865 return hpet_timer_stop_set_go(hpet_tick);
868 #define PIT_MODE 0x43
871 static void __init __pit_init(int val, u8 mode)
875 spin_lock_irqsave(&i8253_lock, flags);
876 outb_p(mode, PIT_MODE);
877 outb_p(val & 0xff, PIT_CH0); /* LSB */
878 outb_p(val >> 8, PIT_CH0); /* MSB */
879 spin_unlock_irqrestore(&i8253_lock, flags);
882 void __init pit_init(void)
884 __pit_init(LATCH, 0x34); /* binary, mode 2, LSB/MSB, ch 0 */
887 void __init pit_stop_interrupt(void)
889 __pit_init(0, 0x30); /* mode 0 */
892 void __init stop_timer_interrupt(void)
895 if (vxtime.hpet_address) {
897 hpet_timer_stop_set_go(0);
900 pit_stop_interrupt();
902 printk(KERN_INFO "timer: %s interrupt stopped.\n", name);
905 int __init time_setup(char *str)
907 report_lost_ticks = 1;
911 static struct irqaction irq0 = {
912 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
915 void __init time_init(void)
920 #ifdef HPET_HACK_ENABLE_DANGEROUS
921 if (!vxtime.hpet_address) {
922 printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
924 outl(0x800038a0, 0xcf8);
925 outl(0xff000001, 0xcfc);
926 outl(0x800038a0, 0xcf8);
927 vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
928 printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
929 "at %#lx.\n", vxtime.hpet_address);
933 vxtime.hpet_address = 0;
935 xtime.tv_sec = get_cmos_time();
938 set_normalized_timespec(&wall_to_monotonic,
939 -xtime.tv_sec, -xtime.tv_nsec);
942 vxtime_hz = (1000000000000000L + hpet_period / 2) /
945 vxtime.hpet_address = 0;
947 if (hpet_use_timer) {
948 cpu_khz = hpet_calibrate_tsc();
950 #ifdef CONFIG_X86_PM_TIMER
951 } else if (pmtmr_ioport && !vxtime.hpet_address) {
952 vxtime_hz = PM_TIMER_FREQUENCY;
955 cpu_khz = pit_calibrate_tsc();
959 cpu_khz = pit_calibrate_tsc();
963 vxtime.mode = VXTIME_TSC;
964 gtod = time_init_gtod();
966 printk(KERN_INFO "time.c: Using %ld.%06ld MHz WALL %s GTOD %s timer.\n",
967 vxtime_hz / 1000000, vxtime_hz % 1000000, timename, gtod);
968 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
969 cpu_khz / 1000, cpu_khz % 1000);
970 vxtime.quot = (1000000L << 32) / vxtime_hz;
971 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
972 vxtime.last_tsc = get_cycles_sync();
975 set_cyc2ns_scale(cpu_khz);
979 * Make an educated guess if the TSC is trustworthy and synchronized
982 __cpuinit int unsynchronized_tsc(void)
985 if (oem_force_hpet_timer())
987 /* Intel systems are normally all synchronized. Exceptions
988 are handled in the OEM check above. */
989 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
992 /* Assume multi socket systems are not synchronized */
993 return num_present_cpus() > 1;
997 * Decide what mode gettimeofday should use.
999 __init static char *time_init_gtod(void)
1003 if (unsynchronized_tsc())
1005 if (vxtime.hpet_address && notsc) {
1006 timetype = hpet_use_timer ? "HPET" : "PIT/HPET";
1008 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
1010 vxtime.last = hpet_readl(HPET_COUNTER);
1011 vxtime.mode = VXTIME_HPET;
1012 do_gettimeoffset = do_gettimeoffset_hpet;
1013 #ifdef CONFIG_X86_PM_TIMER
1014 /* Using PM for gettimeofday is quite slow, but we have no other
1015 choice because the TSC is too unreliable on some systems. */
1016 } else if (pmtmr_ioport && !vxtime.hpet_address && notsc) {
1018 do_gettimeoffset = do_gettimeoffset_pm;
1019 vxtime.mode = VXTIME_PMTMR;
1020 sysctl_vsyscall = 0;
1021 printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n");
1024 timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC";
1025 vxtime.mode = VXTIME_TSC;
1030 __setup("report_lost_ticks", time_setup);
1032 static long clock_cmos_diff;
1033 static unsigned long sleep_start;
1036 * sysfs support for the timer.
1039 static int timer_suspend(struct sys_device *dev, pm_message_t state)
1042 * Estimate time zone so that set_time can update the clock
1044 long cmos_time = get_cmos_time();
1046 clock_cmos_diff = -cmos_time;
1047 clock_cmos_diff += get_seconds();
1048 sleep_start = cmos_time;
1052 static int timer_resume(struct sys_device *dev)
1054 unsigned long flags;
1056 unsigned long ctime = get_cmos_time();
1057 unsigned long sleep_length = (ctime - sleep_start) * HZ;
1059 if (vxtime.hpet_address)
1062 i8254_timer_resume();
1064 sec = ctime + clock_cmos_diff;
1065 write_seqlock_irqsave(&xtime_lock,flags);
1068 if (vxtime.mode == VXTIME_HPET) {
1070 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
1072 vxtime.last = hpet_readl(HPET_COUNTER);
1073 #ifdef CONFIG_X86_PM_TIMER
1074 } else if (vxtime.mode == VXTIME_PMTMR) {
1078 vxtime.last_tsc = get_cycles_sync();
1079 write_sequnlock_irqrestore(&xtime_lock,flags);
1080 jiffies += sleep_length;
1081 wall_jiffies += sleep_length;
1082 monotonic_base += sleep_length * (NSEC_PER_SEC/HZ);
1083 touch_softlockup_watchdog();
1087 static struct sysdev_class timer_sysclass = {
1088 .resume = timer_resume,
1089 .suspend = timer_suspend,
1090 set_kset_name("timer"),
1093 /* XXX this driverfs stuff should probably go elsewhere later -john */
1094 static struct sys_device device_timer = {
1096 .cls = &timer_sysclass,
1099 static int time_init_device(void)
1101 int error = sysdev_class_register(&timer_sysclass);
1103 error = sysdev_register(&device_timer);
1107 device_initcall(time_init_device);
1109 #ifdef CONFIG_HPET_EMULATE_RTC
1110 /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1111 * is enabled, we support RTC interrupt functionality in software.
1112 * RTC has 3 kinds of interrupts:
1113 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1115 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1116 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1117 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1118 * (1) and (2) above are implemented using polling at a frequency of
1119 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1120 * overhead. (DEFAULT_RTC_INT_FREQ)
1121 * For (3), we use interrupts at 64Hz or user specified periodic
1122 * frequency, whichever is higher.
1124 #include <linux/rtc.h>
1126 #define DEFAULT_RTC_INT_FREQ 64
1127 #define RTC_NUM_INTS 1
1129 static unsigned long UIE_on;
1130 static unsigned long prev_update_sec;
1132 static unsigned long AIE_on;
1133 static struct rtc_time alarm_time;
1135 static unsigned long PIE_on;
1136 static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1137 static unsigned long PIE_count;
1139 static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
1140 static unsigned int hpet_t1_cmp; /* cached comparator register */
1142 int is_hpet_enabled(void)
1144 return vxtime.hpet_address != 0;
1148 * Timer 1 for RTC, we do not use periodic interrupt feature,
1149 * even if HPET supports periodic interrupts on Timer 1.
1150 * The reason being, to set up a periodic interrupt in HPET, we need to
1151 * stop the main counter. And if we do that everytime someone diables/enables
1152 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
1153 * So, for the time being, simulate the periodic interrupt in software.
1155 * hpet_rtc_timer_init() is called for the first time and during subsequent
1156 * interuppts reinit happens through hpet_rtc_timer_reinit().
1158 int hpet_rtc_timer_init(void)
1160 unsigned int cfg, cnt;
1161 unsigned long flags;
1163 if (!is_hpet_enabled())
1166 * Set the counter 1 and enable the interrupts.
1168 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1169 hpet_rtc_int_freq = PIE_freq;
1171 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1173 local_irq_save(flags);
1174 cnt = hpet_readl(HPET_COUNTER);
1175 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1176 hpet_writel(cnt, HPET_T1_CMP);
1178 local_irq_restore(flags);
1180 cfg = hpet_readl(HPET_T1_CFG);
1181 cfg &= ~HPET_TN_PERIODIC;
1182 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
1183 hpet_writel(cfg, HPET_T1_CFG);
1188 static void hpet_rtc_timer_reinit(void)
1190 unsigned int cfg, cnt;
1192 if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
1193 cfg = hpet_readl(HPET_T1_CFG);
1194 cfg &= ~HPET_TN_ENABLE;
1195 hpet_writel(cfg, HPET_T1_CFG);
1199 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1200 hpet_rtc_int_freq = PIE_freq;
1202 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1204 /* It is more accurate to use the comparator value than current count.*/
1206 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
1207 hpet_writel(cnt, HPET_T1_CMP);
1212 * The functions below are called from rtc driver.
1213 * Return 0 if HPET is not being used.
1214 * Otherwise do the necessary changes and return 1.
1216 int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1218 if (!is_hpet_enabled())
1221 if (bit_mask & RTC_UIE)
1223 if (bit_mask & RTC_PIE)
1225 if (bit_mask & RTC_AIE)
1231 int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1233 int timer_init_reqd = 0;
1235 if (!is_hpet_enabled())
1238 if (!(PIE_on | AIE_on | UIE_on))
1239 timer_init_reqd = 1;
1241 if (bit_mask & RTC_UIE) {
1244 if (bit_mask & RTC_PIE) {
1248 if (bit_mask & RTC_AIE) {
1252 if (timer_init_reqd)
1253 hpet_rtc_timer_init();
1258 int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1260 if (!is_hpet_enabled())
1263 alarm_time.tm_hour = hrs;
1264 alarm_time.tm_min = min;
1265 alarm_time.tm_sec = sec;
1270 int hpet_set_periodic_freq(unsigned long freq)
1272 if (!is_hpet_enabled())
1281 int hpet_rtc_dropped_irq(void)
1283 if (!is_hpet_enabled())
1289 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1291 struct rtc_time curr_time;
1292 unsigned long rtc_int_flag = 0;
1293 int call_rtc_interrupt = 0;
1295 hpet_rtc_timer_reinit();
1297 if (UIE_on | AIE_on) {
1298 rtc_get_rtc_time(&curr_time);
1301 if (curr_time.tm_sec != prev_update_sec) {
1302 /* Set update int info, call real rtc int routine */
1303 call_rtc_interrupt = 1;
1304 rtc_int_flag = RTC_UF;
1305 prev_update_sec = curr_time.tm_sec;
1310 if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
1311 /* Set periodic int info, call real rtc int routine */
1312 call_rtc_interrupt = 1;
1313 rtc_int_flag |= RTC_PF;
1318 if ((curr_time.tm_sec == alarm_time.tm_sec) &&
1319 (curr_time.tm_min == alarm_time.tm_min) &&
1320 (curr_time.tm_hour == alarm_time.tm_hour)) {
1321 /* Set alarm int info, call real rtc int routine */
1322 call_rtc_interrupt = 1;
1323 rtc_int_flag |= RTC_AF;
1326 if (call_rtc_interrupt) {
1327 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1328 rtc_interrupt(rtc_int_flag, dev_id, regs);
1334 static int __init nohpet_setup(char *s)
1340 __setup("nohpet", nohpet_setup);
1342 int __init notsc_setup(char *s)
1348 __setup("notsc", notsc_setup);