1 #ifndef __ASM_CRIS_SYSTEM_H
2 #define __ASM_CRIS_SYSTEM_H
4 #include <linux/config.h>
6 #include <asm/segment.h>
8 /* the switch_to macro calls resume, an asm function in entry.S which does the actual
12 extern struct task_struct *resume(struct task_struct *prev, struct task_struct *next, int);
13 #define prepare_to_switch() do { } while(0)
14 #define switch_to(prev,next,last) last = resume(prev,next, \
15 (int)&((struct task_struct *)0)->thread)
17 /* read the CPU PC register */
19 extern inline unsigned long rdpc(void)
22 __asm__ volatile ("move.d $pc,%0" : "=rm" (pc));
26 /* read the CPU version register */
28 extern inline unsigned long rdvr(void) {
30 __asm__ volatile ("move $vr,%0" : "=rm" (vr));
34 /* read/write the user-mode stackpointer */
36 extern inline unsigned long rdusp(void) {
38 __asm__ __volatile__("move $usp,%0" : "=rm" (usp));
43 __asm__ __volatile__("move %0,$usp" : /* no outputs */ : "rm" (usp))
45 /* read the current stackpointer */
47 extern inline unsigned long rdsp(void) {
49 __asm__ __volatile__("move.d $sp,%0" : "=rm" (sp));
53 extern inline unsigned long _get_base(char * addr)
58 #define nop() __asm__ __volatile__ ("nop");
60 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
61 #define tas(ptr) (xchg((ptr),1))
63 struct __xchg_dummy { unsigned long a[100]; };
64 #define __xg(x) ((struct __xchg_dummy *)(x))
66 #ifdef CONFIG_ETRAX_DEBUG_INTERRUPT
68 /* use these and an oscilloscope to see the fraction of time we're running with IRQ's disabled */
69 /* it assumes the LED's are on port 0x90000000 of course. */
70 #define sti() __asm__ __volatile__ ( "ei\n\tpush $r0\n\tmoveq 0,$r0\n\tmove.d $r0,[0x90000000]\n\tpop $r0" );
71 #define cli() __asm__ __volatile__ ( "di\n\tpush $r0\n\tmove.d 0x40000,$r0\n\tmove.d $r0,[0x90000000]\n\tpop $r0");
72 #define save_flags(x) __asm__ __volatile__ ("move $ccr,%0" : "=rm" (x) : : "memory");
73 #define restore_flags(x) __asm__ __volatile__ ("move %0,$ccr\n\tbtstq 5,%0\n\tbpl 1f\n\tnop\n\tpush $r0\n\tmoveq 0,$r0\n\tmove.d $r0,[0x90000000]\n\tpop $r0\n1:\n" : : "r" (x) : "memory");
76 /* Log when interrupts are turned on and off and who did it. */
77 #define CCR_EI_MASK (1 << 5)
79 extern int log_int_pos;
80 extern int log_int_size;
81 extern int log_int_enable;
82 extern int log_int_trig0_pos;
83 extern int log_int_trig1_pos;
84 extern void log_int(unsigned long pc, unsigned long prev_ccr, unsigned long next_ccr);
86 /* If you only want to log changes - change to 1 to a 0 below */
87 #define LOG_INT(pc, curr_ccr, next_ccr) do { \
88 if (1 || (curr_ccr ^ next_ccr) & CCR_EI_MASK) \
89 log_int((pc), curr_ccr, next_ccr); \
92 #define __save_flags(x) __asm__ __volatile__ ("move $ccr,%0" : "=rm" (x) : : "memory");
94 extern inline void __cli(void)
96 unsigned long pc = rdpc();
97 unsigned long curr_ccr; __save_flags(curr_ccr);
98 LOG_INT(pc, curr_ccr, 0);
99 __asm__ __volatile__ ( "di" : : :"memory");
103 extern inline void __sti(void)
105 unsigned long pc = rdpc();
106 unsigned long curr_ccr; __save_flags(curr_ccr);
107 LOG_INT(pc, curr_ccr, CCR_EI_MASK);
108 __asm__ __volatile__ ( "ei" : : :"memory");
111 extern inline void __restore_flags(unsigned long x)
113 unsigned long pc = rdpc();
114 unsigned long curr_ccr; __save_flags(curr_ccr);
115 LOG_INT(pc, curr_ccr, x);
116 __asm__ __volatile__ ("move %0,$ccr" : : "rm" (x) : "memory");
119 /* For spinlocks etc */
120 #define local_irq_save(x) do { __save_flags(x); __cli(); }while (0)
121 #define local_irq_restore(x) restore_flags(x)
123 #define local_irq_disable() cli()
124 #define local_irq_enable() sti()
129 #define __cli() __asm__ __volatile__ ( "di" : : :"memory");
130 #define __sti() __asm__ __volatile__ ( "ei" : : :"memory");
131 #define __save_flags(x) __asm__ __volatile__ ("move $ccr,%0" : "=rm" (x) : : "memory");
132 #define __restore_flags(x) __asm__ __volatile__ ("move %0,$ccr" : : "rm" (x) : "memory");
134 /* For spinlocks etc */
135 #define local_irq_save(x) __asm__ __volatile__ ("move $ccr,%0\n\tdi" : "=rm" (x) : : "memory");
136 #define local_irq_set(x) __asm__ __volatile__ ("move $ccr,%0\n\tei" : "=rm" (x) : : "memory");
137 #define local_irq_restore(x) restore_flags(x)
139 #define local_irq_disable() cli()
140 #define local_irq_enable() sti()
144 #define cli() __cli()
145 #define sti() __sti()
146 #define save_flags(x) __save_flags(x)
147 #define restore_flags(x) __restore_flags(x)
148 #define save_and_cli(x) do { save_flags(x); cli(); } while(0)
149 #define save_and_sti(x) do { save_flags(x); sti(); } while(0)
151 extern inline unsigned long __xchg(unsigned long x, void * ptr, int size)
153 /* since Etrax doesn't have any atomic xchg instructions, we need to disable
154 irq's (if enabled) and do it with move.d's */
157 save_flags(flags); /* save flags, including irq enable bit */
158 cli(); /* shut off irq's */
161 __asm__ __volatile__ (
166 : "m" (*__xg(ptr)), "r" (x)
170 __asm__ __volatile__ (
175 : "m" (*__xg(ptr)), "r" (x)
179 __asm__ __volatile__ (
184 : "m" (*__xg(ptr)), "r" (x)
188 restore_flags(flags); /* restore irq enable bit */
191 unsigned long flags,temp;
192 save_flags(flags); /* save flags, including irq enable bit */
193 cli(); /* shut off irq's */
196 *((unsigned char *)&temp) = x;
197 x = *(unsigned char *)ptr;
198 *(unsigned char *)ptr = *((unsigned char *)&temp);
201 *((unsigned short *)&temp) = x;
202 x = *(unsigned short *)ptr;
203 *(unsigned short *)ptr = *((unsigned short *)&temp);
207 x = *(unsigned long *)ptr;
208 *(unsigned long *)ptr = temp;
211 restore_flags(flags); /* restore irq enable bit */
216 #define mb() __asm__ __volatile__ ("" : : : "memory")
219 #define set_mb(var, value) do { var = value; mb(); } while (0)
220 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
223 #define smp_mb() mb()
224 #define smp_rmb() rmb()
225 #define smp_wmb() wmb()
227 #define smp_mb() barrier()
228 #define smp_rmb() barrier()
229 #define smp_wmb() barrier()
235 * disable hlt during certain critical i/o operations
237 #define HAVE_DISABLE_HLT
238 void disable_hlt(void);
239 void enable_hlt(void);