4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <asm/segment.h>
11 #define LOCK_PREFIX "lock ; "
13 #define LOCK_PREFIX ""
16 #define prepare_to_switch() do {} while(0)
19 #define STR(x) __STR(x)
21 #define __PUSH(x) "pushq %%" __STR(x) "\n\t"
22 #define __POP(x) "popq %%" __STR(x) "\n\t"
24 struct save_context_frame {
41 /* frame pointer must be last for get_wchan */
42 #define SAVE_CONTEXT \
43 __PUSH(rsi) __PUSH(rdi) \
44 __PUSH(r12) __PUSH(r13) __PUSH(r14) __PUSH(r15) \
45 __PUSH(rdx) __PUSH(rcx) __PUSH(r8) __PUSH(r9) __PUSH(r10) __PUSH(r11) \
46 __PUSH(rbx) __PUSH(rbp)
47 #define RESTORE_CONTEXT \
48 __POP(rbp) __POP(rbx) \
49 __POP(r11) __POP(r10) __POP(r9) __POP(r8) __POP(rcx) __POP(rdx) \
50 __POP(r15) __POP(r14) __POP(r13) __POP(r12) \
53 #define switch_to(prev,next,last) do { void *l; \
54 asm volatile(SAVE_CONTEXT \
55 "movq %%rsp,%0\n\t" /* save RSP */ \
56 "movq %3,%%rsp\n\t" /* restore RSP */ \
57 "leaq thread_return(%%rip),%%rax\n\t" \
58 "movq %%rax,%1\n\t" /* save RIP */ \
59 "pushq %4\n\t" /* setup new RIP */ \
60 "jmp __switch_to\n\t" \
61 ".globl thread_return\n" \
62 "thread_return:\n\t" \
64 :"=m" (prev->thread.rsp),"=m" (prev->thread.rip), "=a" (l) \
65 :"m" (next->thread.rsp),"m" (next->thread.rip), \
66 "S" (next), "D" (prev) \
71 extern void load_gs_index(unsigned);
74 * Load a segment. Fall back on loading the zero
75 * segment if something goes wrong..
77 #define loadsegment(seg,value) \
80 "movl %0,%%" #seg "\n" \
82 ".section .fixup,\"ax\"\n" \
84 "movl %1,%%" #seg "\n\t" \
87 ".section __ex_table,\"a\"\n\t" \
91 : :"r" ((int)(value)), "r" (0))
93 #define set_debug(value,register) \
94 __asm__("movq %0,%%db" #register \
96 :"r" ((unsigned long) value))
100 * Clear and set 'TS' bit respectively
102 #define clts() __asm__ __volatile__ ("clts")
103 #define read_cr0() ({ \
104 unsigned long __dummy; \
106 "movq %%cr0,%0\n\t" \
110 #define write_cr0(x) \
111 __asm__("movq %0,%%cr0": :"r" (x));
113 #define read_cr4() ({ \
114 unsigned long __dummy; \
116 "movq %%cr4,%0\n\t" \
120 #define write_cr4(x) \
121 __asm__("movq %0,%%cr4": :"r" (x));
122 #define stts() write_cr0(8 | read_cr0())
125 __asm__ __volatile__ ("wbinvd": : :"memory");
127 #endif /* __KERNEL__ */
129 #define nop() __asm__ __volatile__ ("nop")
131 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
133 #define tas(ptr) (xchg((ptr),1))
135 #define __xg(x) ((volatile long *)(x))
137 extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
142 #define _set_64bit set_64bit
145 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
146 * Note 2: xchg has side effect, so that attribute volatile is necessary,
147 * but generally the primitive is invalid, *ptr is output argument. --ANK
149 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
153 __asm__ __volatile__("xchgb %b0,%1"
155 :"m" (*__xg(ptr)), "0" (x)
159 __asm__ __volatile__("xchgw %w0,%1"
161 :"m" (*__xg(ptr)), "0" (x)
165 __asm__ __volatile__("xchgl %k0,%1"
167 :"m" (*__xg(ptr)), "0" (x)
171 __asm__ __volatile__("xchgq %0,%1"
173 :"m" (*__xg(ptr)), "0" (x)
181 * Atomic compare and exchange. Compare OLD with MEM, if identical,
182 * store NEW in MEM. Return the initial value in MEM. Success is
183 * indicated by comparing RETURN with OLD.
186 #define __HAVE_ARCH_CMPXCHG 1
188 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
189 unsigned long new, int size)
194 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
196 : "q"(new), "m"(*__xg(ptr)), "0"(old)
200 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
202 : "q"(new), "m"(*__xg(ptr)), "0"(old)
206 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
208 : "q"(new), "m"(*__xg(ptr)), "0"(old)
212 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
214 : "q"(new), "m"(*__xg(ptr)), "0"(old)
221 #define cmpxchg(ptr,o,n)\
222 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
223 (unsigned long)(n),sizeof(*(ptr))))
227 #define smp_mb() mb()
228 #define smp_rmb() rmb()
229 #define smp_wmb() wmb()
231 #define smp_mb() barrier()
232 #define smp_rmb() barrier()
233 #define smp_wmb() barrier()
238 * Force strict CPU ordering.
239 * And yes, this is required on UP too when we're talking
242 * For now, "wmb()" doesn't actually do anything, as all
243 * Intel CPU's follow what Intel calls a *Processor Order*,
244 * in which all writes are seen in the program order even
247 * I expect future Intel CPU's to have a weaker ordering,
248 * but I'd also expect them to finally get their act together
249 * and add some real memory barriers if so.
251 #define mb() asm volatile("mfence":::"memory")
252 #define rmb() asm volatile("lfence":::"memory")
253 #define wmb() asm volatile("sfence":::"memory")
254 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
255 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
257 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
259 /* interrupt control.. */
260 #define __save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
261 #define __restore_flags(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
262 #define __cli() __asm__ __volatile__("cli": : :"memory")
263 #define __sti() __asm__ __volatile__("sti": : :"memory")
264 /* used in the idle loop; sti takes one instruction cycle to complete */
265 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
267 #define __save_and_cli(x) do { __save_flags(x); __cli(); } while(0)
268 #define __save_and_sti(x) do { __save_flags(x); __sti(); } while(0)
270 /* For spinlocks etc */
271 #define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
272 #define local_irq_set(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_set \n\t pushfq ; popq %0 ; sti":"=g" (x): /* no input */ :"memory"); } while (0)
273 #define local_irq_restore(x) __asm__ __volatile__("# local_irq_restore \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory")
274 #define local_irq_disable() __cli()
275 #define local_irq_enable() __sti()
279 extern void __global_cli(void);
280 extern void __global_sti(void);
281 extern unsigned long __global_save_flags(void);
282 extern void __global_restore_flags(unsigned long);
283 #define cli() __global_cli()
284 #define sti() __global_sti()
285 #define save_flags(x) ((x)=__global_save_flags())
286 #define restore_flags(x) __global_restore_flags(x)
287 #define save_and_cli(x) do { save_flags(x); cli(); } while(0)
288 #define save_and_sti(x) do { save_flags(x); sti(); } while(0)
292 #define cli() __cli()
293 #define sti() __sti()
294 #define save_flags(x) __save_flags(x)
295 #define restore_flags(x) __restore_flags(x)
296 #define save_and_cli(x) __save_and_cli(x)
297 #define save_and_sti(x) __save_and_sti(x)
301 /* Default simics "magic" breakpoint */
302 #define icebp() asm volatile("xchg %%bx,%%bx" ::: "ebx")
305 * disable hlt during certain critical i/o operations
307 #define HAVE_DISABLE_HLT
308 void disable_hlt(void);
309 void enable_hlt(void);