www.usr.com/support/gpl/USR9113_release1.0.tar.gz
[bcm963xx.git] / kernel / linux / arch / mips / brcm-boards / bcm963xx / time.c
1 /*
2 <:copyright-gpl
3  Copyright 2004 Broadcom Corp. All Rights Reserved.
4
5  This program is free software; you can distribute it and/or modify it
6  under the terms of the GNU General Public License (Version 2) as
7  published by the Free Software Foundation.
8
9  This program is distributed in the hope it will be useful, but WITHOUT
10  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  for more details.
13
14  You should have received a copy of the GNU General Public License along
15  with this program; if not, write to the Free Software Foundation, Inc.,
16  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
17 :>
18 */
19 /*
20  * Setup time for Broadcom 963xx MIPS boards
21  */
22
23 #include <linux/config.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/sched.h>
27 #include <linux/spinlock.h>
28 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/time.h>
31 #include <linux/timex.h>
32
33 #include <asm/mipsregs.h>
34 #include <asm/ptrace.h>
35 #include <asm/div64.h>
36 #include <asm/time.h>
37
38 #include <bcm_map_part.h>
39 #include <bcm_intr.h>
40
41 unsigned long r4k_interval;     /* Amount to increment compare reg each time */
42 static unsigned long r4k_cur;   /* What counter should be at next timer irq */
43
44 /* Cycle counter value at the previous timer interrupt.. */
45 static unsigned int timerhi = 0, timerlo = 0;
46
47 extern volatile unsigned long wall_jiffies;
48
49 /* Optional board-specific timer routine */
50 void (*board_timer_interrupt)(int irq, void *dev_id, struct pt_regs * regs);
51
52 static inline void ack_r4ktimer(unsigned long newval)
53 {
54         write_c0_compare(newval);
55 }
56
57 /*
58  * There are a lot of conceptually broken versions of the MIPS timer interrupt
59  * handler floating around.  This one is rather different, but the algorithm
60  * is provably more robust.
61  */
62 static irqreturn_t brcm_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
63 {
64         unsigned int count;
65
66         if (r4k_interval == 0)
67                 goto null;
68
69         do {
70                 do_timer(regs);
71
72                 if (board_timer_interrupt)
73                         board_timer_interrupt(irq, dev_id, regs);
74
75                 r4k_cur += r4k_interval;
76                 ack_r4ktimer(r4k_cur);
77
78         } while (((count = (unsigned long)read_c0_count())
79                   - r4k_cur) < 0x7fffffff);
80
81         if (!jiffies) {
82                 /*
83                  * If jiffies has overflowed in this timer_interrupt we must
84                  * update the timer[hi]/[lo] to make do_fast_gettimeoffset()
85                  * quotient calc still valid. -arca
86                  */
87                 timerhi = timerlo = 0;
88         } else {
89                 /*
90                  * The cycle counter is only 32 bit which is good for about
91                  * a minute at current count rates of upto 150MHz or so.
92                  */
93                 timerhi += (count < timerlo);   /* Wrap around */
94                 timerlo = count;
95         }
96
97         return IRQ_HANDLED;
98
99 null:
100         ack_r4ktimer(0);
101         return IRQ_NONE;
102 }
103
104 static struct irqaction brcm_timer_action = {
105         .handler        = brcm_timer_interrupt,
106         .flags          = SA_INTERRUPT,
107         .mask           = CPU_MASK_NONE,
108         .name           = "timer",
109         .next           = NULL,
110         .dev_id         = brcm_timer_interrupt,
111 };
112
113
114 void __init brcm_timer_setup(struct irqaction *irq)
115 {
116         r4k_cur = (read_c0_count() + r4k_interval);
117         write_c0_compare(r4k_cur);
118
119         /* we are using the cpu counter for timer interrupts */
120         irq->handler = no_action;     /* we use our own handler */
121         setup_irq(MIPS_TIMER_INT, &brcm_timer_action);
122         set_c0_status(IE_IRQ5);
123 }
124
125 #if 0
126 /* This is for machines which generate the exact clock. */
127 #define USECS_PER_JIFFY (1000000/HZ)
128 #define USECS_PER_JIFFY_FRAC (0x100000000*1000000/HZ&0xffffffff)
129
130 static void call_do_div64_32( unsigned long *res, unsigned int high,
131     unsigned int low, unsigned long base )
132 {
133     do_div64_32(*res, high, low, base);
134 }
135
136 /*
137  * FIXME: Does playing with the RP bit in c0_status interfere with this code?
138  */
139 static unsigned long do_fast_gettimeoffset(void)
140 {
141         u32 count;
142         unsigned long res, tmp;
143
144         /* Last jiffy when do_fast_gettimeoffset() was called. */
145         static unsigned long last_jiffies=0;
146         unsigned long quotient;
147
148         /*
149          * Cached "1/(clocks per usec)*2^32" value.
150          * It has to be recalculated once each jiffy.
151          */
152         static unsigned long cached_quotient=0;
153
154         tmp = jiffies;
155
156         quotient = cached_quotient;
157
158         if (tmp && last_jiffies != tmp) {
159                 last_jiffies = tmp;
160 #ifdef CONFIG_CPU_MIPS32
161                 if (last_jiffies != 0) {
162
163                         unsigned long r0;
164                         /* gcc 3.0.1 gets an internal compiler error if there are two
165                          * do_div64_32 inline macros.  To work around this problem,
166                          * do_div64_32 is called as a function.
167                          */
168                         call_do_div64_32(&r0, timerhi, timerlo, tmp);
169                         call_do_div64_32(&quotient, USECS_PER_JIFFY,
170                                     USECS_PER_JIFFY_FRAC, r0);
171
172                         cached_quotient = quotient;
173
174                 }
175 #else
176                 __asm__(".set\tnoreorder\n\t"
177                         ".set\tnoat\n\t"
178                         ".set\tmips3\n\t"
179                         "lwu\t%0,%2\n\t"
180                         "dsll32\t$1,%1,0\n\t"
181                         "or\t$1,$1,%0\n\t"
182                         "ddivu\t$0,$1,%3\n\t"
183                         "mflo\t$1\n\t"
184                         "dsll32\t%0,%4,0\n\t"
185                         "nop\n\t"
186                         "ddivu\t$0,%0,$1\n\t"
187                         "mflo\t%0\n\t"
188                         ".set\tmips0\n\t"
189                         ".set\tat\n\t"
190                         ".set\treorder"
191                         :"=&r" (quotient)
192                         :"r" (timerhi),
193                          "m" (timerlo),
194                          "r" (tmp),
195                          "r" (USECS_PER_JIFFY)
196                         :"$1");
197                 cached_quotient = quotient;
198 #endif
199         }
200
201         /* Get last timer tick in absolute kernel time */
202         count = read_c0_count();
203
204         /* .. relative to previous jiffy (32 bits is enough) */
205         count -= timerlo;
206
207         __asm__("multu\t%1,%2\n\t"
208                 "mfhi\t%0"
209                 :"=r" (res)
210                 :"r" (count),
211                  "r" (quotient));
212
213         /*
214          * Due to possible jiffies inconsistencies, we need to check 
215          * the result so that we'll get a timer that is monotonic.
216          */
217         if (res >= USECS_PER_JIFFY)
218                 res = USECS_PER_JIFFY-1;
219
220         return res;
221 }
222
223 void do_gettimeofday(struct timeval *tv)
224 {
225         unsigned int flags;
226
227         read_lock_irqsave (&xtime_lock, flags);
228         tv->tv_sec = xtime.tv_sec;
229         tv->tv_usec = xtime.tv_nsec/1000;
230         tv->tv_usec += do_fast_gettimeoffset();
231
232         /*
233          * xtime is atomically updated in timer_bh. jiffies - wall_jiffies
234          * is nonzero if the timer bottom half hasnt executed yet.
235          */
236         if (jiffies - wall_jiffies)
237                 tv->tv_usec += USECS_PER_JIFFY;
238
239         read_unlock_irqrestore (&xtime_lock, flags);
240
241         if (tv->tv_usec >= 1000000) {
242                 tv->tv_usec -= 1000000;
243                 tv->tv_sec++;
244         }
245 }
246
247 EXPORT_SYMBOL(do_gettimeofday);
248
249 int do_settimeofday(struct timespec *tv)
250 {
251         write_lock_irq (&xtime_lock);
252
253         /* This is revolting. We need to set the xtime.tv_usec correctly.
254          * However, the value in this location is is value at the last tick.
255          * Discover what correction gettimeofday would have done, and then
256          * undo it!
257          */
258         tv->tv_nsec -= do_fast_gettimeoffset()*NSEC_PER_USEC;
259
260         if (tv->tv_nsec < 0) {
261                 tv->tv_nsec += 1000000*NSEC_PER_USEC;
262                 tv->tv_sec--;
263         }
264
265         xtime.tv_sec = tv->tv_sec;
266         xtime.tv_nsec = tv->tv_nsec;
267         time_adjust = 0;                /* stop active adjtime() */
268         time_status |= STA_UNSYNC;
269         time_maxerror = NTP_PHASE_LIMIT;
270         time_esterror = NTP_PHASE_LIMIT;
271
272         write_unlock_irq (&xtime_lock);
273 }
274
275 EXPORT_SYMBOL(do_settimeofday);
276
277 #endif