1 #ifndef _ASM_X86_SYSTEM_H_
2 #define _ASM_X86_SYSTEM_H_
6 #include <linux/kernel.h>
9 # include "system_32.h"
11 # include "system_64.h"
15 #define _set_base(addr, base) do { unsigned long __pr; \
16 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
17 "rorl $16,%%edx\n\t" \
27 #define _set_limit(addr, limit) do { unsigned long __lr; \
28 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
29 "rorl $16,%%edx\n\t" \
31 "andb $0xf0,%%dh\n\t" \
40 #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
41 #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
43 extern void load_gs_index(unsigned);
46 * Load a segment. Fall back on loading the zero
47 * segment if something goes wrong..
49 #define loadsegment(seg, value) \
52 "movl %k0,%%" #seg "\n" \
54 ".section .fixup,\"ax\"\n" \
56 "movl %k1, %%" #seg "\n\t" \
59 ".section __ex_table,\"a\"\n\t" \
63 : :"r" (value), "r" (0))
67 * Save a segment register away
69 #define savesegment(seg, value) \
70 asm volatile("mov %%" #seg ",%0":"=rm" (value))
72 static inline unsigned long get_limit(unsigned long segment)
74 unsigned long __limit;
76 :"=r" (__limit):"r" (segment));
80 static inline void native_clts(void)
82 asm volatile ("clts");
86 * Volatile isn't enough to prevent the compiler from reordering the
87 * read/write functions for the control registers and messing everything up.
88 * A memory clobber would solve the problem, but would prevent reordering of
89 * all loads stores around it, which can hurt performance. Solution is to
90 * use a variable and mimic reads and writes to it to enforce serialization
92 static unsigned long __force_order;
94 static inline unsigned long native_read_cr0(void)
97 asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order));
101 static inline void native_write_cr0(unsigned long val)
103 asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order));
106 static inline unsigned long native_read_cr2(void)
109 asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order));
113 static inline void native_write_cr2(unsigned long val)
115 asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order));
118 static inline unsigned long native_read_cr3(void)
121 asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order));
125 static inline void native_write_cr3(unsigned long val)
127 asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order));
130 static inline unsigned long native_read_cr4(void)
133 asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order));
137 static inline unsigned long native_read_cr4_safe(void)
140 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
141 * exists, so it will never fail. */
143 asm volatile("1: mov %%cr4, %0 \n"
145 ".section __ex_table,\"a\" \n"
148 : "=r" (val), "=m" (__force_order) : "0" (0));
150 val = native_read_cr4();
155 static inline void native_write_cr4(unsigned long val)
157 asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order));
160 static inline void native_wbinvd(void)
162 asm volatile("wbinvd": : :"memory");
164 #ifdef CONFIG_PARAVIRT
165 #include <asm/paravirt.h>
167 #define read_cr0() (native_read_cr0())
168 #define write_cr0(x) (native_write_cr0(x))
169 #define read_cr2() (native_read_cr2())
170 #define write_cr2(x) (native_write_cr2(x))
171 #define read_cr3() (native_read_cr3())
172 #define write_cr3(x) (native_write_cr3(x))
173 #define read_cr4() (native_read_cr4())
174 #define read_cr4_safe() (native_read_cr4_safe())
175 #define write_cr4(x) (native_write_cr4(x))
176 #define wbinvd() (native_wbinvd())
178 /* Clear the 'TS' bit */
179 #define clts() (native_clts())
181 #endif/* CONFIG_PARAVIRT */
183 #define stts() write_cr0(8 | read_cr0())
185 #endif /* __KERNEL__ */
187 static inline void clflush(void *__p)
189 asm volatile("clflush %0" : "+m" (*(char __force *)__p));
192 #define nop() __asm__ __volatile__ ("nop")
194 void disable_hlt(void);
195 void enable_hlt(void);
197 extern int es7000_plat;
198 void cpu_idle_wait(void);
200 extern unsigned long arch_align_stack(unsigned long sp);
201 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
203 void default_idle(void);
206 * Force strict CPU ordering.
207 * And yes, this is required on UP too when we're talking
212 * For now, "wmb()" doesn't actually do anything, as all
213 * Intel CPU's follow what Intel calls a *Processor Order*,
214 * in which all writes are seen in the program order even
217 * I expect future Intel CPU's to have a weaker ordering,
218 * but I'd also expect them to finally get their act together
219 * and add some real memory barriers if so.
221 * Some non intel clones support out of order store. wmb() ceases to be a
224 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
225 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
226 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
228 #define mb() asm volatile("mfence":::"memory")
229 #define rmb() asm volatile("lfence":::"memory")
230 #define wmb() asm volatile("sfence" ::: "memory")
234 * read_barrier_depends - Flush all pending reads that subsequents reads
237 * No data-dependent reads from memory-like regions are ever reordered
238 * over this barrier. All reads preceding this primitive are guaranteed
239 * to access memory (but not necessarily other CPUs' caches) before any
240 * reads following this primitive that depend on the data return by
241 * any of the preceding reads. This primitive is much lighter weight than
242 * rmb() on most CPUs, and is never heavier weight than is
245 * These ordering constraints are respected by both the local CPU
248 * Ordering is not guaranteed by anything other than these primitives,
249 * not even by data dependencies. See the documentation for
250 * memory_barrier() for examples and URLs to more information.
252 * For example, the following code would force ordering (the initial
253 * value of "a" is zero, "b" is one, and "p" is "&a"):
261 * read_barrier_depends();
265 * because the read of "*q" depends on the read of "p" and these
266 * two reads are separated by a read_barrier_depends(). However,
267 * the following code, with the same initial values for "a" and "b":
275 * read_barrier_depends();
279 * does not enforce ordering, since there is no data dependency between
280 * the read of "a" and the read of "b". Therefore, on some CPUs, such
281 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
282 * in cases like this where there are no data dependencies.
285 #define read_barrier_depends() do { } while (0)
288 #define smp_mb() mb()
289 #ifdef CONFIG_X86_PPRO_FENCE
290 # define smp_rmb() rmb()
292 # define smp_rmb() barrier()
294 #ifdef CONFIG_X86_OOSTORE
295 # define smp_wmb() wmb()
297 # define smp_wmb() barrier()
299 #define smp_read_barrier_depends() read_barrier_depends()
300 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
302 #define smp_mb() barrier()
303 #define smp_rmb() barrier()
304 #define smp_wmb() barrier()
305 #define smp_read_barrier_depends() do { } while (0)
306 #define set_mb(var, value) do { var = value; barrier(); } while (0)