X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=include%2Fasm-i386%2Fpgtable.h;h=e6a4723f0eb1f088affdc67cd42a8f5d7c02ce21;hb=6f949909e8f9e5d7e5584dc48d9a5e060c52aed1;hp=8cb708a6bed057e764daf33f4eceae5f46bb3b94;hpb=d6d861e3c963b4077c83e078e3e300c4b81f93e7;p=powerpc.git diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 8cb708a6be..e6a4723f0e 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -15,6 +15,7 @@ #include #include #include +#include #ifndef _I386_BITOPS_H #include @@ -34,14 +35,14 @@ struct vm_area_struct; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) extern unsigned long empty_zero_page[1024]; extern pgd_t swapper_pg_dir[1024]; -extern kmem_cache_t *pgd_cache; -extern kmem_cache_t *pmd_cache; +extern struct kmem_cache *pgd_cache; +extern struct kmem_cache *pmd_cache; extern spinlock_t pgd_lock; extern struct page *pgd_list; -void pmd_ctor(void *, kmem_cache_t *, unsigned long); -void pgd_ctor(void *, kmem_cache_t *, unsigned long); -void pgd_dtor(void *, kmem_cache_t *, unsigned long); +void pmd_ctor(void *, struct kmem_cache *, unsigned long); +void pgd_ctor(void *, struct kmem_cache *, unsigned long); +void pgd_dtor(void *, struct kmem_cache *, unsigned long); void pgtable_cache_init(void); void paging_init(void); @@ -246,6 +247,24 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p # include #endif +#ifndef CONFIG_PARAVIRT +/* + * Rules for using pte_update - it must be called after any PTE update which + * has not been done using the set_pte / clear_pte interfaces. It is used by + * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE + * updates should either be sets, clears, or set_pte_atomic for P->P + * transitions, which means this hook should only be called for user PTEs. + * This hook implies a P->P protection or access change has taken place, which + * requires a subsequent TLB flush. The notification can optionally be delayed + * until the TLB flush event by using the pte_update_defer form of the + * interface, but care must be taken to assure that the flush happens while + * still holding the same page table lock so that the shadow and primary pages + * do not become out of sync on SMP. + */ +#define pte_update(mm, addr, ptep) do { } while (0) +#define pte_update_defer(mm, addr, ptep) do { } while (0) +#endif + /* * We only update the dirty/accessed state if we set * the dirty bit by hand in the kernel, since the hardware @@ -258,6 +277,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p do { \ if (dirty) { \ (ptep)->pte_low = (entry).pte_low; \ + pte_update_defer((vma)->vm_mm, (address), (ptep)); \ flush_tlb_page(vma, address); \ } \ } while (0) @@ -287,6 +307,7 @@ do { \ __dirty = pte_dirty(*(ptep)); \ if (__dirty) { \ clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \ + pte_update_defer((vma)->vm_mm, (address), (ptep)); \ flush_tlb_page(vma, address); \ } \ __dirty; \ @@ -299,11 +320,20 @@ do { \ __young = pte_young(*(ptep)); \ if (__young) { \ clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \ + pte_update_defer((vma)->vm_mm, (address), (ptep)); \ flush_tlb_page(vma, address); \ } \ __young; \ }) +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + pte_t pte = raw_ptep_get_and_clear(ptep); + pte_update(mm, addr, ptep); + return pte; +} + #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) { @@ -321,6 +351,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { clear_bit(_PAGE_BIT_RW, &ptep->pte_low); + pte_update(mm, addr, ptep); } /*