X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=include%2Fasm-i386%2Ftlbflush.h;h=db7f77eacfa0392dfd96d8e2d929ba50f30bc0c8;hb=978c038ec944e4f2c940b0975c6acb433203a9be;hp=d57ca5c540b69cd74821e9ca7cec039a9e3b805a;hpb=be883da7594b0a2a02074e683673ae0e522566a4;p=powerpc.git diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h index d57ca5c540..db7f77eacf 100644 --- a/include/asm-i386/tlbflush.h +++ b/include/asm-i386/tlbflush.h @@ -4,7 +4,15 @@ #include #include -#define __flush_tlb() \ +#ifdef CONFIG_PARAVIRT +#include +#else +#define __flush_tlb() __native_flush_tlb() +#define __flush_tlb_global() __native_flush_tlb_global() +#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) +#endif + +#define __native_flush_tlb() \ do { \ unsigned int tmpreg; \ \ @@ -19,7 +27,7 @@ * Global pages have to be flushed a bit differently. Not a real * performance problem because this does not happen often. */ -#define __flush_tlb_global() \ +#define __native_flush_tlb_global() \ do { \ unsigned int tmpreg, cr4, cr4_orig; \ \ @@ -36,7 +44,8 @@ : "memory"); \ } while (0) -extern unsigned long pgkern_mask; +#define __native_flush_tlb_single(addr) \ + __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory") # define __flush_tlb_all() \ do { \ @@ -48,9 +57,6 @@ extern unsigned long pgkern_mask; #define cpu_has_invlpg (boot_cpu_data.x86 > 3) -#define __flush_tlb_single(addr) \ - __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) - #ifdef CONFIG_X86_INVLPG # define __flush_tlb_one(addr) __flush_tlb_single(addr) #else @@ -73,11 +79,15 @@ extern unsigned long pgkern_mask; * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables + * - flush_tlb_others(cpumask, mm, va) flushes a TLBs on other cpus * * ..but the i386 has somewhat limited tlb flushing capabilities, * and page-granular flushes are available only on i486 and up. */ +#define TLB_FLUSH_ALL 0xffffffff + + #ifndef CONFIG_SMP #define flush_tlb() __flush_tlb() @@ -104,7 +114,12 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, __flush_tlb(); } -#else +static inline void native_flush_tlb_others(const cpumask_t *cpumask, + struct mm_struct *mm, unsigned long va) +{ +} + +#else /* SMP */ #include @@ -123,6 +138,9 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st flush_tlb_mm(vma->vm_mm); } +void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, + unsigned long va); + #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 @@ -133,8 +151,11 @@ struct tlb_state char __cacheline_padding[L1_CACHE_BYTES-8]; }; DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); +#endif /* SMP */ - +#ifndef CONFIG_PARAVIRT +#define flush_tlb_others(mask, mm, va) \ + native_flush_tlb_others(&mask, mm, va) #endif #define flush_tlb_kernel_range(start, end) flush_tlb_all()