1 #ifndef _ASM_X86_SPECIAL_INSNS_H
2 #define _ASM_X86_SPECIAL_INSNS_H
9 static inline void native_clts(void)
15 * Volatile isn't enough to prevent the compiler from reordering the
16 * read/write functions for the control registers and messing everything up.
17 * A memory clobber would solve the problem, but would prevent reordering of
18 * all loads stores around it, which can hurt performance. Solution is to
19 * use a variable and mimic reads and writes to it to enforce serialization
21 extern unsigned long __force_order;
23 static inline unsigned long native_read_cr0(void)
26 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
30 static inline void native_write_cr0(unsigned long val)
32 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
35 static inline unsigned long native_read_cr2(void)
38 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
42 static inline void native_write_cr2(unsigned long val)
44 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
47 static inline unsigned long native_read_cr3(void)
50 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
54 static inline void native_write_cr3(unsigned long val)
56 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
59 static inline unsigned long native_read_cr4(void)
62 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
66 static inline unsigned long native_read_cr4_safe(void)
69 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
70 * exists, so it will never fail. */
72 asm volatile("1: mov %%cr4, %0\n"
75 : "=r" (val), "=m" (__force_order) : "0" (0));
77 val = native_read_cr4();
82 static inline void native_write_cr4(unsigned long val)
84 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
88 static inline unsigned long native_read_cr8(void)
91 asm volatile("movq %%cr8,%0" : "=r" (cr8));
95 static inline void native_write_cr8(unsigned long val)
97 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
101 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
102 static inline u32 __read_pkru(void)
108 * "rdpkru" instruction. Places PKRU contents in to EAX,
109 * clears EDX and requires that ecx=0.
111 asm volatile(".byte 0x0f,0x01,0xee\n\t"
112 : "=a" (pkru), "=d" (edx)
117 static inline u32 __read_pkru(void)
123 static inline void native_wbinvd(void)
125 asm volatile("wbinvd": : :"memory");
128 extern asmlinkage void native_load_gs_index(unsigned);
130 #ifdef CONFIG_PARAVIRT
131 #include <asm/paravirt.h>
134 static inline unsigned long read_cr0(void)
136 return native_read_cr0();
139 static inline void write_cr0(unsigned long x)
144 static inline unsigned long read_cr2(void)
146 return native_read_cr2();
149 static inline void write_cr2(unsigned long x)
154 static inline unsigned long read_cr3(void)
156 return native_read_cr3();
159 static inline void write_cr3(unsigned long x)
164 static inline unsigned long __read_cr4(void)
166 return native_read_cr4();
169 static inline unsigned long __read_cr4_safe(void)
171 return native_read_cr4_safe();
174 static inline void __write_cr4(unsigned long x)
179 static inline void wbinvd(void)
186 static inline unsigned long read_cr8(void)
188 return native_read_cr8();
191 static inline void write_cr8(unsigned long x)
196 static inline void load_gs_index(unsigned selector)
198 native_load_gs_index(selector);
203 /* Clear the 'TS' bit */
204 static inline void clts(void)
209 #endif/* CONFIG_PARAVIRT */
211 #define stts() write_cr0(read_cr0() | X86_CR0_TS)
213 static inline void clflush(volatile void *__p)
215 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
218 static inline void clflushopt(volatile void *__p)
220 alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
221 ".byte 0x66; clflush %P0",
222 X86_FEATURE_CLFLUSHOPT,
223 "+m" (*(volatile char __force *)__p));
226 static inline void clwb(volatile void *__p)
228 volatile struct { char x[64]; } *p = __p;
230 asm volatile(ALTERNATIVE_2(
231 ".byte " __stringify(NOP_DS_PREFIX) "; clflush (%[pax])",
232 ".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */
233 X86_FEATURE_CLFLUSHOPT,
234 ".byte 0x66, 0x0f, 0xae, 0x30", /* clwb (%%rax) */
241 * pcommit_sfence() - persistent commit and fence
243 * The PCOMMIT instruction ensures that data that has been flushed from the
244 * processor's cache hierarchy with CLWB, CLFLUSHOPT or CLFLUSH is accepted to
245 * memory and is durable on the DIMM. The primary use case for this is
248 * This function shows how to properly use CLWB/CLFLUSHOPT/CLFLUSH and PCOMMIT
249 * with appropriate fencing.
252 * void flush_and_commit_buffer(void *vaddr, unsigned int size)
254 * unsigned long clflush_mask = boot_cpu_data.x86_clflush_size - 1;
255 * void *vend = vaddr + size;
258 * for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
259 * p < vend; p += boot_cpu_data.x86_clflush_size)
262 * // SFENCE to order CLWB/CLFLUSHOPT/CLFLUSH cache flushes
263 * // MFENCE via mb() also works
266 * // PCOMMIT and the required SFENCE for ordering
270 * After this function completes the data pointed to by 'vaddr' has been
271 * accepted to memory and will be durable if the 'vaddr' points to persistent
274 * PCOMMIT must always be ordered by an MFENCE or SFENCE, so to help simplify
275 * things we include both the PCOMMIT and the required SFENCE in the
276 * alternatives generated by pcommit_sfence().
278 static inline void pcommit_sfence(void)
280 alternative(ASM_NOP7,
281 ".byte 0x66, 0x0f, 0xae, 0xf8\n\t" /* pcommit */
283 X86_FEATURE_PCOMMIT);
286 #define nop() asm volatile ("nop")
289 #endif /* __KERNEL__ */
291 #endif /* _ASM_X86_SPECIAL_INSNS_H */