2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * arch/sh64/kernel/time.c
8 * Copyright (C) 2000, 2001 Paolo Alberelli
10 * Original TMU/RTC code taken from sh version.
11 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
12 * Some code taken from i386 version.
13 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
16 #include <linux/config.h>
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/param.h>
21 #include <linux/string.h>
23 #include <linux/interrupt.h>
24 #include <linux/time.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/smp.h>
29 #include <asm/registers.h> /* required by inline __asm__ stmt. */
31 #include <asm/processor.h>
32 #include <asm/uaccess.h>
35 #include <asm/delay.h>
37 #include <linux/timex.h>
38 #include <linux/irq.h>
39 #include <asm/hardware.h>
41 #define TMU_TOCR_INIT 0x00
42 #define TMU0_TCR_INIT 0x0020
43 #define TMU_TSTR_INIT 1
46 #define RCR1_CF 0x80 /* Carry Flag */
47 #define RCR1_CIE 0x10 /* Carry Interrupt Enable */
48 #define RCR1_AIE 0x08 /* Alarm Interrupt Enable */
49 #define RCR1_AF 0x01 /* Alarm Flag */
52 #define RCR2_PEF 0x80 /* PEriodic interrupt Flag */
53 #define RCR2_PESMASK 0x70 /* Periodic interrupt Set */
54 #define RCR2_RTCEN 0x08 /* ENable RTC */
55 #define RCR2_ADJ 0x04 /* ADJustment (30-second) */
56 #define RCR2_RESET 0x02 /* Reset bit */
57 #define RCR2_START 0x01 /* Start bit */
59 /* Clock, Power and Reset Controller */
60 #define CPRC_BLOCK_OFF 0x01010000
61 #define CPRC_BASE PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF
63 #define FRQCR (cprc_base+0x0)
64 #define WTCSR (cprc_base+0x0018)
65 #define STBCR (cprc_base+0x0030)
67 /* Time Management Unit */
68 #define TMU_BLOCK_OFF 0x01020000
69 #define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
70 #define TMU0_BASE tmu_base + 0x8 + (0xc * 0x0)
71 #define TMU1_BASE tmu_base + 0x8 + (0xc * 0x1)
72 #define TMU2_BASE tmu_base + 0x8 + (0xc * 0x2)
74 #define TMU_TOCR tmu_base+0x0 /* Byte access */
75 #define TMU_TSTR tmu_base+0x4 /* Byte access */
77 #define TMU0_TCOR TMU0_BASE+0x0 /* Long access */
78 #define TMU0_TCNT TMU0_BASE+0x4 /* Long access */
79 #define TMU0_TCR TMU0_BASE+0x8 /* Word access */
82 #define RTC_BLOCK_OFF 0x01040000
83 #define RTC_BASE PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF
85 #define R64CNT rtc_base+0x00
86 #define RSECCNT rtc_base+0x04
87 #define RMINCNT rtc_base+0x08
88 #define RHRCNT rtc_base+0x0c
89 #define RWKCNT rtc_base+0x10
90 #define RDAYCNT rtc_base+0x14
91 #define RMONCNT rtc_base+0x18
92 #define RYRCNT rtc_base+0x1c /* 16bit */
93 #define RSECAR rtc_base+0x20
94 #define RMINAR rtc_base+0x24
95 #define RHRAR rtc_base+0x28
96 #define RWKAR rtc_base+0x2c
97 #define RDAYAR rtc_base+0x30
98 #define RMONAR rtc_base+0x34
99 #define RCR1 rtc_base+0x38
100 #define RCR2 rtc_base+0x3c
103 #define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
107 #define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
110 extern rwlock_t xtime_lock;
111 #define TICK_SIZE tick
113 extern unsigned long wall_jiffies;
114 extern unsigned long volatile jiffies;
116 static unsigned long tmu_base, rtc_base;
117 unsigned long cprc_base;
119 /* Variables to allow interpolation of time of day to resolution better than a
122 /* This is effectively protected by xtime_lock */
123 static unsigned long ctc_last_interrupt;
124 static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */
126 #define CTC_JIFFY_SCALE_SHIFT 40
128 /* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */
129 static unsigned long long scaled_recip_ctc_ticks_per_jiffy;
131 /* Estimate number of microseconds that have elapsed since the last timer tick,
132 by scaling the delta that has occured in the CTC register.
134 WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at
135 the CPU clock rate. If the CPU sleeps, the CTC stops counting. Bear this
136 in mind if enabling SLEEP_WORKS in process.c. In that case, this algorithm
137 probably needs to use TMU.TCNT0 instead. This will work even if the CPU is
138 sleeping, though will be coarser.
140 FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime
141 is running or if the freq or tick arguments of adjtimex are modified after
142 we have calibrated the scaling factor? This will result in either a jump at
143 the end of a tick period, or a wrap backwards at the start of the next one,
144 if the application is reading the time of day often enough. I think we
145 ought to do better than this. For this reason, usecs_per_jiffy is left
146 separated out in the calculation below. This allows some future hook into
147 the adjtime-related stuff in kernel/timer.c to remove this hazard.
151 static unsigned long usecs_since_tick(void)
153 unsigned long long current_ctc;
154 long ctc_ticks_since_interrupt;
155 unsigned long long ull_ctc_ticks_since_interrupt;
156 unsigned long result;
158 unsigned long long mul1_out;
159 unsigned long long mul1_out_high;
160 unsigned long long mul2_out_low, mul2_out_high;
162 /* Read CTC register */
163 asm ("getcon cr62, %0" : "=r" (current_ctc));
164 /* Note, the CTC counts down on each CPU clock, not up.
165 Note(2), use long type to get correct wraparound arithmetic when
166 the counter crosses zero. */
167 ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc;
168 ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt;
170 /* Inline assembly to do 32x32x32->64 multiplier */
171 asm volatile ("mulu.l %1, %2, %0" :
173 "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy));
175 mul1_out_high = mul1_out >> 32;
177 asm volatile ("mulu.l %1, %2, %0" :
178 "=r" (mul2_out_low) :
179 "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy));
182 asm volatile ("mulu.l %1, %2, %0" :
183 "=r" (mul2_out_high) :
184 "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy));
187 result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT);
192 void do_gettimeofday(struct timeval *tv)
195 unsigned long usec, sec;
197 read_lock_irqsave(&xtime_lock, flags);
198 usec = usecs_since_tick();
200 unsigned long lost = jiffies - wall_jiffies;
202 usec += lost * (1000000 / HZ);
205 usec += xtime.tv_usec;
206 read_unlock_irqrestore(&xtime_lock, flags);
208 while (usec >= 1000000) {
217 void do_settimeofday(struct timeval *tv)
219 write_lock_irq(&xtime_lock);
221 time_adjust = 0; /* stop active adjtime() */
222 time_status |= STA_UNSYNC;
223 time_maxerror = NTP_PHASE_LIMIT;
224 time_esterror = NTP_PHASE_LIMIT;
225 write_unlock_irq(&xtime_lock);
228 static int set_rtc_time(unsigned long nowtime)
231 int real_seconds, real_minutes, cmos_minutes;
233 ctrl_outb(RCR2_RESET, RCR2); /* Reset pre-scaler & stop RTC */
235 cmos_minutes = ctrl_inb(RMINCNT);
236 BCD_TO_BIN(cmos_minutes);
239 * since we're only adjusting minutes and seconds,
240 * don't interfere with hour overflow. This avoids
241 * messing with unknown time zones but requires your
242 * RTC not to be off by more than 15 minutes
244 real_seconds = nowtime % 60;
245 real_minutes = nowtime / 60;
246 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
247 real_minutes += 30; /* correct for half hour time zone */
250 if (abs(real_minutes - cmos_minutes) < 30) {
251 BIN_TO_BCD(real_seconds);
252 BIN_TO_BCD(real_minutes);
253 ctrl_outb(real_seconds, RSECCNT);
254 ctrl_outb(real_minutes, RMINCNT);
257 "set_rtc_time: can't update from %d to %d\n",
258 cmos_minutes, real_minutes);
262 ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start RTC */
267 /* last time the RTC clock got updated */
268 static long last_rtc_update = 0;
270 static inline void sh64_do_profile(unsigned long pc)
274 /* Don't profile cpu_idle.. */
275 if (!prof_buffer || !current->pid)
278 pc -= (unsigned long) &_stext;
282 * Don't ignore out-of-bounds PC values silently, put them into the
283 * last histogram slot, so if present, they will show up as a sharp
286 if (pc > prof_len - 1)
289 /* We could just be sloppy and not lock against a re-entry on this
290 increment, but the profiling code won't always be linked in anyway. */
291 atomic_inc((atomic_t *)&prof_buffer[pc]);
295 * timer_interrupt() needs to keep up the real-time clock,
296 * as well as call the "do_timer()" routine every clocktick
298 static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
300 unsigned long long current_ctc;
301 asm ("getcon cr62, %0" : "=r" (current_ctc));
302 ctc_last_interrupt = (unsigned long) current_ctc;
306 if (!user_mode(regs))
307 sh64_do_profile(regs->pc);
309 #ifdef CONFIG_HEARTBEAT
310 extern void heartbeat(void);
316 * If we have an externally synchronized Linux clock, then update
317 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
318 * called as close as possible to 500 ms before the new second starts.
320 if ((time_status & STA_UNSYNC) == 0 &&
321 xtime.tv_sec > last_rtc_update + 660 &&
322 xtime.tv_usec >= 500000 - ((unsigned) tick) / 2 &&
323 xtime.tv_usec <= 500000 + ((unsigned) tick) / 2) {
324 if (set_rtc_time(xtime.tv_sec) == 0)
325 last_rtc_update = xtime.tv_sec;
327 last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
332 * This is the same as the above, except we _also_ save the current
333 * Time Stamp Counter value at the time of the timer interrupt, so that
334 * we later on can estimate the time of day more exactly.
336 static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
338 unsigned long timer_status;
341 timer_status = ctrl_inw(TMU0_TCR);
342 timer_status &= ~0x100;
343 ctrl_outw(timer_status, TMU0_TCR);
346 * Here we are in the timer irq handler. We just have irqs locally
347 * disabled but we don't know if the timer_bh is running on the other
348 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
349 * the irq version of write_lock because as just said we have irq
350 * locally disabled. -arca
352 write_lock(&xtime_lock);
353 do_timer_interrupt(irq, NULL, regs);
354 write_unlock(&xtime_lock);
358 static unsigned long get_rtc_time(void)
360 unsigned int sec, min, hr, wk, day, mon, yr, yr100;
364 ctrl_outb(0, RCR1); /* Clear CF-bit */
365 sec = ctrl_inb(RSECCNT);
366 min = ctrl_inb(RMINCNT);
367 hr = ctrl_inb(RHRCNT);
368 wk = ctrl_inb(RWKCNT);
369 day = ctrl_inb(RDAYCNT);
370 mon = ctrl_inb(RMONCNT);
371 yr = ctrl_inw(RYRCNT);
374 } while ((ctrl_inb(RCR1) & RCR1_CF) != 0);
384 if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
385 hr > 23 || min > 59 || sec > 59) {
387 "SH RTC: invalid value, resetting to 1 Jan 2000\n");
388 ctrl_outb(RCR2_RESET, RCR2); /* Reset & Stop */
389 ctrl_outb(0, RSECCNT);
390 ctrl_outb(0, RMINCNT);
391 ctrl_outb(0, RHRCNT);
392 ctrl_outb(6, RWKCNT);
393 ctrl_outb(1, RDAYCNT);
394 ctrl_outb(1, RMONCNT);
395 ctrl_outw(0x2000, RYRCNT);
396 ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start */
400 return mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
403 static __init unsigned int get_cpu_mhz(void)
406 unsigned long __dummy;
407 unsigned long ctc_val_init, ctc_val;
410 ** Regardless the toolchain, force the compiler to use the
411 ** arbitrary register r3 as a clock tick counter.
412 ** NOTE: r3 must be in accordance with rtc_interrupt()
414 register unsigned long long __rtc_irq_flag __asm__ ("r3");
417 do {} while (ctrl_inb(R64CNT) != 0);
418 ctrl_outb(RCR1_CIE, RCR1); /* Enable carry interrupt */
421 * r3 is arbitrary. CDC does not support "=z".
423 ctc_val_init = 0xffffffff;
424 ctc_val = ctc_val_init;
426 asm volatile("gettr " __t0 ", %1\n\t"
427 "putcon %0, cr62\n\t"
428 "and %2, r63, %2\n\t"
429 "_pta 4, " __t0 "\n\t"
430 "beq/l %2, r63, " __t0 "\n\t"
431 "ptabs %1, " __t0 "\n\t"
432 "getcon cr62, %0\n\t"
433 : "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag)
438 * CPU clock = 4 stages * loop
442 * (if) pipe line stole
448 * CPU clock = 6 stages * loop
453 * Use CTC register to count. This approach returns the right value
454 * even if the I-cache is disabled (e.g. whilst debugging.)
458 count = ctc_val_init - ctc_val; /* CTC counts down */
460 #if defined (CONFIG_SH_SIMULATOR)
462 * Let's pretend we are a 5MHz SH-5 to avoid a too
463 * little timer interval. Also to keep delay
464 * calibration within a reasonable time.
469 * This really is count by the number of clock cycles
470 * by the ratio between a complete R64CNT
471 * wrap-around (128) and CUI interrupt being raised (64).
477 static void rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
479 ctrl_outb(0, RCR1); /* Disable Carry Interrupts */
480 regs->regs[3] = 1; /* Using r3 */
483 static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, 0, "timer", NULL, NULL};
484 static struct irqaction irq1 = { rtc_interrupt, SA_INTERRUPT, 0, "rtc", NULL, NULL};
486 void __init time_init(void)
488 unsigned int cpu_clock, master_clock, bus_clock, module_clock;
489 unsigned long interval;
490 unsigned long frqcr, ifc, pfc;
491 static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
492 #define bfc_table ifc_table /* Same */
493 #define pfc_table ifc_table /* Same */
495 tmu_base = onchip_remap(TMU_BASE, 1024, "TMU");
496 if (tmu_base == 0UL) {
497 panic("Unable to remap TMU\n");
500 rtc_base = onchip_remap(RTC_BASE, 1024, "RTC");
501 if (rtc_base == 0UL) {
502 panic("Unable to remap RTC\n");
505 cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC");
506 if (cprc_base == 0UL) {
507 panic("Unable to remap CPRC\n");
510 xtime.tv_sec = get_rtc_time();
513 setup_irq(TIMER_IRQ, &irq0);
514 setup_irq(RTC_IRQ, &irq1);
516 /* Check how fast it is.. */
517 cpu_clock = get_cpu_mhz();
519 /* FIXME : Are these divides OK? Note careful order of operations to
520 * maintain reasonable precision and avoid overflow. */
521 scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ));
523 disable_irq(RTC_IRQ);
525 printk("CPU clock: %d.%02dMHz\n",
526 (cpu_clock / 1000000), (cpu_clock % 1000000)/10000);
529 frqcr = ctrl_inl(FRQCR);
530 ifc = ifc_table[(frqcr>> 6) & 0x0007];
531 bfc = bfc_table[(frqcr>> 3) & 0x0007];
532 pfc = pfc_table[(frqcr>> 12) & 0x0007];
533 master_clock = cpu_clock * ifc;
534 bus_clock = master_clock/bfc;
537 printk("Bus clock: %d.%02dMHz\n",
538 (bus_clock/1000000), (bus_clock % 1000000)/10000);
539 module_clock = master_clock/pfc;
540 printk("Module clock: %d.%02dMHz\n",
541 (module_clock/1000000), (module_clock % 1000000)/10000);
542 interval = (module_clock/(HZ*4));
544 printk("Interval = %ld\n", interval);
546 current_cpu_data.cpu_clock = cpu_clock;
547 current_cpu_data.master_clock = master_clock;
548 current_cpu_data.bus_clock = bus_clock;
549 current_cpu_data.module_clock = module_clock;
552 ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
553 ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
554 ctrl_outl(interval, TMU0_TCOR);
555 ctrl_outl(interval, TMU0_TCNT);
556 ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
559 void enter_deep_standby(void)
561 /* Disable watchdog timer */
562 ctrl_outl(0xa5000000, WTCSR);
563 /* Configure deep standby on sleep */
564 ctrl_outl(0x03, STBCR);
566 #ifdef CONFIG_SH_CAYMAN
568 extern void mach_alphanum(int position, unsigned char value);
569 extern void mach_alphanum_brightness(int setting);
570 char halted[] = "Halted. ";
572 mach_alphanum_brightness(6); /* dimmest setting above off */
573 for (i=0; i<8; i++) {
574 mach_alphanum(i, halted[i]);
576 asm __volatile__ ("synco");
580 asm __volatile__ ("sleep");
581 asm __volatile__ ("synci");
582 asm __volatile__ ("nop");
583 asm __volatile__ ("nop");
584 asm __volatile__ ("nop");
585 asm __volatile__ ("nop");
586 panic("Unexpected wakeup!\n");