4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <asm/segment.h>
11 #define LOCK_PREFIX "lock ; "
13 #define LOCK_PREFIX ""
16 #define prepare_to_switch() do {} while(0)
19 #define STR(x) __STR(x)
21 #define __PUSH(x) "pushq %%" __STR(x) "\n\t"
22 #define __POP(x) "popq %%" __STR(x) "\n\t"
24 /* frame pointer must be last for get_wchan */
25 #define SAVE_CONTEXT \
26 __PUSH(rsi) __PUSH(rdi) \
27 __PUSH(r12) __PUSH(r13) __PUSH(r14) __PUSH(r15) \
28 __PUSH(rdx) __PUSH(rcx) __PUSH(r8) __PUSH(r9) __PUSH(r10) __PUSH(r11) \
29 __PUSH(rbx) __PUSH(rbp)
30 #define RESTORE_CONTEXT \
31 __POP(rbp) __POP(rbx) \
32 __POP(r11) __POP(r10) __POP(r9) __POP(r8) __POP(rcx) __POP(rdx) \
33 __POP(r15) __POP(r14) __POP(r13) __POP(r12) \
36 #define switch_to(prev,next,last) do { void *l; \
37 asm volatile(SAVE_CONTEXT \
38 "movq %%rsp,%0\n\t" /* save RSP */ \
39 "movq %3,%%rsp\n\t" /* restore RSP */ \
40 "leaq 1f(%%rip),%%rax\n\t" \
41 "movq %%rax,%1\n\t" /* save RIP */ \
42 "pushq %4\n\t" /* setup new RIP */ \
43 "jmp __switch_to\n\t" \
46 :"=m" (prev->thread.rsp),"=m" (prev->thread.rip), "=a" (l) \
47 :"m" (next->thread.rsp),"m" (next->thread.rip), \
48 "S" (next), "D" (prev) \
53 extern void load_gs_index(unsigned);
56 * Load a segment. Fall back on loading the zero
57 * segment if something goes wrong..
59 #define loadsegment(seg,value) \
62 "movl %0,%%" #seg "\n" \
64 ".section .fixup,\"ax\"\n" \
66 "pushq $0 ; popq %% " #seg "\n\t" \
69 ".section __ex_table,\"a\"\n\t" \
73 : :"r" ((int)(value)))
75 #define set_debug(value,register) \
76 __asm__("movq %0,%%db" #register \
78 :"r" ((unsigned long) value))
82 * Clear and set 'TS' bit respectively
84 #define clts() __asm__ __volatile__ ("clts")
85 #define read_cr0() ({ \
86 unsigned long __dummy; \
92 #define write_cr0(x) \
93 __asm__("movq %0,%%cr0": :"r" (x));
95 #define read_cr4() ({ \
96 unsigned long __dummy; \
102 #define write_cr4(x) \
103 __asm__("movq %0,%%cr4": :"r" (x));
104 #define stts() write_cr0(8 | read_cr0())
107 __asm__ __volatile__ ("wbinvd": : :"memory");
109 #endif /* __KERNEL__ */
111 #define nop() __asm__ __volatile__ ("nop")
113 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
115 #define tas(ptr) (xchg((ptr),1))
117 #define __xg(x) ((volatile long *)(x))
119 extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
124 #define _set_64bit set_64bit
127 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
128 * Note 2: xchg has side effect, so that attribute volatile is necessary,
129 * but generally the primitive is invalid, *ptr is output argument. --ANK
131 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
135 __asm__ __volatile__("xchgb %b0,%1"
137 :"m" (*__xg(ptr)), "0" (x)
141 __asm__ __volatile__("xchgw %w0,%1"
143 :"m" (*__xg(ptr)), "0" (x)
147 __asm__ __volatile__("xchgl %k0,%1"
149 :"m" (*__xg(ptr)), "0" (x)
153 __asm__ __volatile__("xchgq %0,%1"
155 :"m" (*__xg(ptr)), "0" (x)
163 * Atomic compare and exchange. Compare OLD with MEM, if identical,
164 * store NEW in MEM. Return the initial value in MEM. Success is
165 * indicated by comparing RETURN with OLD.
168 #define __HAVE_ARCH_CMPXCHG 1
170 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
171 unsigned long new, int size)
176 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
178 : "q"(new), "m"(*__xg(ptr)), "0"(old)
182 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
184 : "q"(new), "m"(*__xg(ptr)), "0"(old)
188 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
190 : "q"(new), "m"(*__xg(ptr)), "0"(old)
194 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
196 : "q"(new), "m"(*__xg(ptr)), "0"(old)
203 #define cmpxchg(ptr,o,n)\
204 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
205 (unsigned long)(n),sizeof(*(ptr))))
209 #define smp_mb() mb()
210 #define smp_rmb() rmb()
211 #define smp_wmb() wmb()
213 #define smp_mb() barrier()
214 #define smp_rmb() barrier()
215 #define smp_wmb() barrier()
220 * Force strict CPU ordering.
221 * And yes, this is required on UP too when we're talking
224 * For now, "wmb()" doesn't actually do anything, as all
225 * Intel CPU's follow what Intel calls a *Processor Order*,
226 * in which all writes are seen in the program order even
229 * I expect future Intel CPU's to have a weaker ordering,
230 * but I'd also expect them to finally get their act together
231 * and add some real memory barriers if so.
233 #define mb() asm volatile("mfence":::"memory")
234 #define rmb() asm volatile("lfence":::"memory")
235 #define wmb() asm volatile("sfence":::"memory")
236 #define set_mb(var, value) do { xchg(&var, value); } while (0)
237 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
239 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
241 /* interrupt control.. */
242 #define __save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
243 #define __restore_flags(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
244 #define __cli() __asm__ __volatile__("cli": : :"memory")
245 #define __sti() __asm__ __volatile__("sti": : :"memory")
246 /* used in the idle loop; sti takes one instruction cycle to complete */
247 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
249 #define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0);
250 #define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0);
252 /* For spinlocks etc */
253 #define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
254 #define local_irq_set(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_set \n\t pushfq ; popq %0 ; sti":"=g" (x): /* no input */ :"memory"); } while (0)
255 #define local_irq_restore(x) __asm__ __volatile__("# local_irq_restore \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory")
256 #define local_irq_disable() __cli()
257 #define local_irq_enable() __sti()
261 extern void __global_cli(void);
262 extern void __global_sti(void);
263 extern unsigned long __global_save_flags(void);
264 extern void __global_restore_flags(unsigned long);
265 #define cli() __global_cli()
266 #define sti() __global_sti()
267 #define save_flags(x) ((x)=__global_save_flags())
268 #define restore_flags(x) __global_restore_flags(x)
269 #define save_and_cli(x) do { save_flags(x); cli(); } while(0);
270 #define save_and_sti(x) do { save_flags(x); sti(); } while(0);
274 #define cli() __cli()
275 #define sti() __sti()
276 #define save_flags(x) __save_flags(x)
277 #define restore_flags(x) __restore_flags(x)
278 #define save_and_cli(x) __save_and_cli(x)
279 #define save_and_sti(x) __save_and_sti(x)
283 /* Default simics "magic" breakpoint */
284 #define icebp() asm volatile("xchg %%bx,%%bx" ::: "ebx")
287 * disable hlt during certain critical i/o operations
289 #define HAVE_DISABLE_HLT
290 void disable_hlt(void);
291 void enable_hlt(void);