X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=include%2Fasm-x86%2Fsystem_32.h;h=db6283eb5e46d6f53ff24d33b0a5ec091e5a9fd0;hb=7462894a7cb03b54b9139f31fab5928366752a78;hp=d69ba937e09251769e2f00d54c0c91562a4127e8;hpb=547307420931344a868275bd7ea7a30f117a15a9;p=powerpc.git diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h index d69ba937e0..db6283eb5e 100644 --- a/include/asm-x86/system_32.h +++ b/include/asm-x86/system_32.h @@ -7,6 +7,7 @@ #include #ifdef __KERNEL__ +#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ struct task_struct; /* one of the stranger aspects of C forward declarations.. */ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); @@ -141,7 +142,7 @@ static inline unsigned long native_read_cr4_safe(void) { unsigned long val; /* This could fault if %cr4 does not exist */ - asm("1: movl %%cr4, %0 \n" + asm volatile("1: movl %%cr4, %0 \n" "2: \n" ".section __ex_table,\"a\" \n" ".long 1b,2b \n" @@ -160,6 +161,10 @@ static inline void native_wbinvd(void) asm volatile("wbinvd": : :"memory"); } +static inline void clflush(volatile void *__p) +{ + asm volatile("clflush %0" : "+m" (*(char __force *)__p)); +} #ifdef CONFIG_PARAVIRT #include @@ -216,6 +221,7 @@ static inline unsigned long get_limit(unsigned long segment) #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) +#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) /** * read_barrier_depends - Flush all pending reads that subsequents reads @@ -271,18 +277,18 @@ static inline unsigned long get_limit(unsigned long segment) #define read_barrier_depends() do { } while(0) +#ifdef CONFIG_SMP +#define smp_mb() mb() +#ifdef CONFIG_X86_PPRO_FENCE +# define smp_rmb() rmb() +#else +# define smp_rmb() barrier() +#endif #ifdef CONFIG_X86_OOSTORE -/* Actually there are no OOO store capable CPUs for now that do SSE, - but make it already an possibility. */ -#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) +# define smp_wmb() wmb() #else -#define wmb() __asm__ __volatile__ ("": : :"memory") +# define smp_wmb() barrier() #endif - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() #define smp_read_barrier_depends() read_barrier_depends() #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) #else