make oldconfig will rebuild these...
[linux-2.4.21-pre4.git] / include / asm-mips / system.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 1999 by Ralf Baechle
7  * Copyright (C) 1996 by Paul M. Antoine
8  * Copyright (C) 1994 - 1999 by Ralf Baechle
9  *
10  * Changed set_except_vector declaration to allow return of previous
11  * vector address value - necessary for "borrowing" vectors.
12  *
13  * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
14  * Copyright (C) 2000 MIPS Technologies, Inc.
15  */
16 #ifndef _ASM_SYSTEM_H
17 #define _ASM_SYSTEM_H
18
19 #include <linux/config.h>
20 #include <asm/sgidefs.h>
21
22 #include <linux/kernel.h>
23
24 #include <asm/addrspace.h>
25 #include <asm/ptrace.h>
26
27 __asm__ (
28         ".macro\t__sti\n\t"
29         ".set\tpush\n\t"
30         ".set\treorder\n\t"
31         ".set\tnoat\n\t"
32         "mfc0\t$1,$12\n\t"
33         "ori\t$1,0x1f\n\t"
34         "xori\t$1,0x1e\n\t"
35         "mtc0\t$1,$12\n\t"
36         ".set\tpop\n\t"
37         ".endm");
38
39 extern __inline__ void
40 __sti(void)
41 {
42         __asm__ __volatile__(
43                 "__sti"
44                 : /* no outputs */
45                 : /* no inputs */
46                 : "memory");
47 }
48
49 /*
50  * For cli() we have to insert nops to make sure that the new value
51  * has actually arrived in the status register before the end of this
52  * macro.
53  * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
54  * no nops at all.
55  */
56 __asm__ (
57         ".macro\t__cli\n\t"
58         ".set\tpush\n\t"
59         ".set\tnoat\n\t"
60         "mfc0\t$1,$12\n\t"
61         "ori\t$1,1\n\t"
62         "xori\t$1,1\n\t"
63         ".set\tnoreorder\n\t"
64         "mtc0\t$1,$12\n\t"
65         "sll\t$0, $0, 1\t\t\t# nop\n\t"
66         "sll\t$0, $0, 1\t\t\t# nop\n\t"
67         "sll\t$0, $0, 1\t\t\t# nop\n\t"
68         ".set\tpop\n\t"
69         ".endm");
70
71 extern __inline__ void
72 __cli(void)
73 {
74         __asm__ __volatile__(
75                 "__cli"
76                 : /* no outputs */
77                 : /* no inputs */
78                 : "memory");
79 }
80
81 __asm__ (
82         ".macro\t__save_flags flags\n\t"
83         ".set\tpush\n\t"
84         ".set\treorder\n\t"
85         "mfc0\t\\flags, $12\n\t"
86         ".set\tpop\n\t"
87         ".endm");
88
89 #define __save_flags(x)                                                 \
90 __asm__ __volatile__(                                                   \
91         "__save_flags %0"                                               \
92         : "=r" (x))
93
94 __asm__ (
95         ".macro\t__save_and_cli result\n\t"
96         ".set\tpush\n\t"
97         ".set\treorder\n\t"
98         ".set\tnoat\n\t"
99         "mfc0\t\\result, $12\n\t"
100         "ori\t$1, \\result, 1\n\t"
101         "xori\t$1, 1\n\t"
102         ".set\tnoreorder\n\t"
103         "mtc0\t$1, $12\n\t"
104         "sll\t$0, $0, 1\t\t\t# nop\n\t"
105         "sll\t$0, $0, 1\t\t\t# nop\n\t"
106         "sll\t$0, $0, 1\t\t\t# nop\n\t"
107         ".set\tpop\n\t"
108         ".endm");
109
110 #define __save_and_cli(x)                                               \
111 __asm__ __volatile__(                                                   \
112         "__save_and_cli\t%0"                                            \
113         : "=r" (x)                                                      \
114         : /* no inputs */                                               \
115         : "memory")
116
117 #define __save_and_sti(x)       do { __save_flags(x); __sti(); } while(0);
118
119 __asm__(".macro\t__restore_flags flags\n\t"
120         ".set\tnoreorder\n\t"
121         ".set\tnoat\n\t"
122         "mfc0\t$1, $12\n\t"
123         "andi\t\\flags, 1\n\t"
124         "ori\t$1, 1\n\t"
125         "xori\t$1, 1\n\t"
126         "or\t\\flags, $1\n\t"
127         "mtc0\t\\flags, $12\n\t"
128         "sll\t$0, $0, 1\t\t\t# nop\n\t"
129         "sll\t$0, $0, 1\t\t\t# nop\n\t"
130         "sll\t$0, $0, 1\t\t\t# nop\n\t"
131         ".set\tat\n\t"
132         ".set\treorder\n\t"
133         ".endm");
134
135 #define __restore_flags(flags)                                          \
136 do {                                                                    \
137         unsigned long __tmp1;                                           \
138                                                                         \
139         __asm__ __volatile__(                                           \
140                 "__restore_flags\t%0"                                   \
141                 : "=r" (__tmp1)                                         \
142                 : "0" (flags)                                           \
143                 : "memory");                                            \
144 } while(0)
145
146 #ifdef CONFIG_SMP
147
148 extern void __global_sti(void);
149 extern void __global_cli(void);
150 extern unsigned long __global_save_flags(void);
151 extern void __global_restore_flags(unsigned long);
152 #  define sti() __global_sti()
153 #  define cli() __global_cli()
154 #  define save_flags(x) do { x = __global_save_flags(); } while (0)
155 #  define restore_flags(x) __global_restore_flags(x)
156 #  define save_and_cli(x) do { save_flags(x); cli(); } while(0)
157 #  define save_and_sti(x) do { save_flags(x); sti(); } while(0)
158
159 #else /* Single processor */
160
161 #  define sti() __sti()
162 #  define cli() __cli()
163 #  define save_flags(x) __save_flags(x)
164 #  define save_and_cli(x) __save_and_cli(x)
165 #  define restore_flags(x) __restore_flags(x)
166 #  define save_and_sti(x) __save_and_sti(x)
167
168 #endif /* SMP */
169
170 /* For spinlocks etc */
171 #define local_irq_save(x)       __save_and_cli(x)
172 #define local_irq_set(x)        __save_and_sti(x)
173 #define local_irq_restore(x)    __restore_flags(x)
174 #define local_irq_disable()     __cli()
175 #define local_irq_enable()      __sti()
176
177 #ifdef CONFIG_CPU_HAS_SYNC
178 #define __sync()                                \
179         __asm__ __volatile__(                   \
180                 ".set   push\n\t"               \
181                 ".set   noreorder\n\t"          \
182                 ".set   mips2\n\t"              \
183                 "sync\n\t"                      \
184                 ".set   pop"                    \
185                 : /* no output */               \
186                 : /* no input */                \
187                 : "memory")
188 #else
189 #define __sync()        do { } while(0)
190 #endif
191
192 #define __fast_iob()                            \
193         __asm__ __volatile__(                   \
194                 ".set   push\n\t"               \
195                 ".set   noreorder\n\t"          \
196                 "lw     $0,%0\n\t"              \
197                 "nop\n\t"                       \
198                 ".set   pop"                    \
199                 : /* no output */               \
200                 : "m" (*(int *)KSEG1)           \
201                 : "memory")
202
203 #define fast_wmb()      __sync()
204 #define fast_rmb()      __sync()
205 #define fast_mb()       __sync()
206 #define fast_iob()                              \
207         do {                                    \
208                 __sync();                       \
209                 __fast_iob();                   \
210         } while (0)
211
212 #ifdef CONFIG_CPU_HAS_WB
213
214 #include <asm/wbflush.h>
215
216 #define wmb()           fast_wmb()
217 #define rmb()           fast_rmb()
218 #define mb()            wbflush();
219 #define iob()           wbflush();
220
221 #else /* !CONFIG_CPU_HAS_WB */
222
223 #define wmb()           fast_wmb()
224 #define rmb()           fast_rmb()
225 #define mb()            fast_mb()
226 #define iob()           fast_iob()
227
228 #endif /* !CONFIG_CPU_HAS_WB */
229
230 #ifdef CONFIG_SMP
231 #define smp_mb()        mb()
232 #define smp_rmb()       rmb()
233 #define smp_wmb()       wmb()
234 #else
235 #define smp_mb()        barrier()
236 #define smp_rmb()       barrier()
237 #define smp_wmb()       barrier()
238 #endif
239
240 #define set_mb(var, value) \
241 do { var = value; mb(); } while (0)
242
243 #define set_wmb(var, value) \
244 do { var = value; wmb(); } while (0)
245
246 #ifndef __ASSEMBLY__
247 /*
248  * switch_to(n) should switch tasks to task nr n, first
249  * checking that n isn't the current task, in which case it does nothing.
250  */
251 extern asmlinkage void *resume(void *last, void *next);
252 #endif /* !__ASSEMBLY__ */
253
254 #define prepare_to_switch()     do { } while(0)
255
256 struct task_struct;
257
258 extern asmlinkage void lazy_fpu_switch(void *);
259 extern asmlinkage void init_fpu(void);
260 extern asmlinkage void save_fp(struct task_struct *);
261 extern asmlinkage void restore_fp(struct task_struct *);
262
263 #define switch_to(prev,next,last) \
264 do { \
265         (last) = resume(prev, next); \
266 } while(0)
267
268 /*
269  * For 32 and 64 bit operands we can take advantage of ll and sc.
270  * FIXME: This doesn't work for R3000 machines.
271  */
272 extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
273 {
274 #ifdef CONFIG_CPU_HAS_LLSC
275         unsigned long dummy;
276
277         __asm__ __volatile__(
278                 ".set\tpush\t\t\t\t# xchg_u32\n\t"
279                 ".set\tnoreorder\n\t"
280                 ".set\tnomacro\n\t"
281                 "ll\t%0, %3\n"
282                 "1:\tmove\t%2, %z4\n\t"
283                 "sc\t%2, %1\n\t"
284                 "beqzl\t%2, 1b\n\t"
285                 " ll\t%0, %3\n\t"
286                 "sync\n\t"
287                 ".set\tpop"
288                 : "=&r" (val), "=m" (*m), "=&r" (dummy)
289                 : "R" (*m), "Jr" (val)
290                 : "memory");
291
292         return val;
293 #else
294         unsigned long flags, retval;
295
296         save_flags(flags);
297         cli();
298         retval = *m;
299         *m = val;
300         restore_flags(flags);   /* implies memory barrier  */
301         return retval;
302 #endif /* Processor-dependent optimization */
303 }
304
305 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
306 #define tas(ptr) (xchg((ptr),1))
307
308 static __inline__ unsigned long
309 __xchg(unsigned long x, volatile void * ptr, int size)
310 {
311         switch (size) {
312                 case 4:
313                         return xchg_u32(ptr, x);
314         }
315         return x;
316 }
317
318 extern void *set_except_vector(int n, void *addr);
319
320 extern void __die(const char *, struct pt_regs *, const char *file,
321         const char *func, unsigned long line) __attribute__((noreturn));
322 extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
323         const char *func, unsigned long line);
324
325 #define die(msg, regs)                                                  \
326         __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
327 #define die_if_kernel(msg, regs)                                        \
328         __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
329
330 #endif /* _ASM_SYSTEM_H */