static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
+extern void vmalloc_sync_all(void);
+
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level.h>
#else
*/
#define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
-#define paravirt_map_pt_hook(slot, va, pfn) do { } while (0)
-
-#define raw_ptep_get_and_clear(xp) native_ptep_get_and_clear(xp)
#endif
+/* local pte updates need not use xchg for locking */
+static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
+{
+ pte_t res = *ptep;
+
+ /* Pure native function needs no input for mm, addr */
+ native_pte_clear(NULL, 0, ptep);
+ return res;
+}
+
/*
* We only update the dirty/accessed state if we set
* the dirty bit by hand in the kernel, since the hardware
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- pte_t pte = raw_ptep_get_and_clear(ptep);
+ pte_t pte = native_ptep_get_and_clear(ptep);
pte_update(mm, addr, ptep);
return pte;
}
{
pte_t pte;
if (full) {
- pte = *ptep;
- pte_clear(mm, addr, ptep);
+ /*
+ * Full address destruction in progress; paravirt does not
+ * care about updates and native needs no locking
+ */
+ pte = native_local_ptep_get_and_clear(ptep);
} else {
pte = ptep_get_and_clear(mm, addr, ptep);
}
#endif
#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
-({ \
- pte_t *__ptep; \
- unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
- __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE0);\
- paravirt_map_pt_hook(KM_PTE0,__ptep, pfn); \
- __ptep = __ptep + pte_index(address); \
- __ptep; \
-})
-#define pte_offset_map_nested(dir, address) \
-({ \
- pte_t *__ptep; \
- unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
- __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE1);\
- paravirt_map_pt_hook(KM_PTE1,__ptep, pfn); \
- __ptep = __ptep + pte_index(address); \
- __ptep; \
-})
+#define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
+#define pte_offset_map_nested(dir, address) \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
#else
* tables contain all the necessary information.
*/
#define update_mmu_cache(vma,address,pte) do { } while (0)
+
+void native_pagetable_setup_start(pgd_t *base);
+void native_pagetable_setup_done(pgd_t *base);
+
+#ifndef CONFIG_PARAVIRT
+static inline void paravirt_pagetable_setup_start(pgd_t *base)
+{
+ native_pagetable_setup_start(base);
+}
+
+static inline void paravirt_pagetable_setup_done(pgd_t *base)
+{
+ native_pagetable_setup_done(base);
+}
+#endif /* !CONFIG_PARAVIRT */
+
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_FLATMEM