1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
4 #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
5 #define FIRST_USER_ADDRESS 0
7 #define _PAGE_BIT_PRESENT 0
9 #define _PAGE_BIT_USER 2
10 #define _PAGE_BIT_PWT 3
11 #define _PAGE_BIT_PCD 4
12 #define _PAGE_BIT_ACCESSED 5
13 #define _PAGE_BIT_DIRTY 6
14 #define _PAGE_BIT_FILE 6
15 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
16 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
18 #define _PAGE_BIT_UNUSED2 10
19 #define _PAGE_BIT_UNUSED3 11
20 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
22 #define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT)
23 #define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW)
24 #define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER)
25 #define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT)
26 #define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD)
27 #define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED)
28 #define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY)
29 #define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */
30 #define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */
31 #define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1)
32 #define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2)
33 #define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3)
35 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
36 #define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX)
41 /* If _PAGE_PRESENT is clear, we use these: */
42 #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */
43 #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
44 pte_present gives true */
46 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
47 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
49 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
51 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
52 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
54 #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
55 #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
56 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
57 #define PAGE_COPY PAGE_COPY_NOEXEC
58 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
59 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
62 #define _PAGE_KERNEL_EXEC \
63 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
64 #define _PAGE_KERNEL (_PAGE_KERNEL_EXEC | _PAGE_NX)
67 extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
68 #endif /* __ASSEMBLY__ */
70 #define __PAGE_KERNEL_EXEC \
71 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
72 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
75 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
76 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
77 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
78 #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
79 #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
80 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
81 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
84 # define MAKE_GLOBAL(x) __pgprot((x))
86 # define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
89 #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
90 #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
91 #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
92 #define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX)
93 #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
94 #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
95 #define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC)
96 #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
97 #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
100 #define __P000 PAGE_NONE
101 #define __P001 PAGE_READONLY
102 #define __P010 PAGE_COPY
103 #define __P011 PAGE_COPY
104 #define __P100 PAGE_READONLY_EXEC
105 #define __P101 PAGE_READONLY_EXEC
106 #define __P110 PAGE_COPY_EXEC
107 #define __P111 PAGE_COPY_EXEC
109 #define __S000 PAGE_NONE
110 #define __S001 PAGE_READONLY
111 #define __S010 PAGE_SHARED
112 #define __S011 PAGE_SHARED
113 #define __S100 PAGE_READONLY_EXEC
114 #define __S101 PAGE_READONLY_EXEC
115 #define __S110 PAGE_SHARED_EXEC
116 #define __S111 PAGE_SHARED_EXEC
121 * ZERO_PAGE is a global shared page that is always zero: used
122 * for zero-mapped memory areas etc..
124 extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
125 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
129 * The following only work if pte_present() is true.
130 * Undefined behaviour if not..
132 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
133 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
134 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
135 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
136 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
138 static inline int pmd_large(pmd_t pte) {
139 return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) ==
140 (_PAGE_PSE|_PAGE_PRESENT);
143 static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_DIRTY); }
144 static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_ACCESSED); }
145 static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_RW); }
146 static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_NX); }
147 static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); }
148 static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); }
149 static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); }
150 static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); }
151 static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_PSE); }
153 extern pteval_t __supported_pte_mask;
155 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
157 return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
158 pgprot_val(pgprot)) & __supported_pte_mask);
161 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
163 return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
164 pgprot_val(pgprot)) & __supported_pte_mask);
167 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
169 pteval_t val = pte_val(pte);
172 * Chop off the NX bit (if present), and add the NX portion of
173 * the newprot (if present):
175 val &= _PAGE_CHG_MASK & ~_PAGE_NX;
176 val |= pgprot_val(newprot) & __supported_pte_mask;
181 #ifdef CONFIG_PARAVIRT
182 #include <asm/paravirt.h>
183 #else /* !CONFIG_PARAVIRT */
184 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
185 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
187 #define set_pte_present(mm, addr, ptep, pte) \
188 native_set_pte_present(mm, addr, ptep, pte)
189 #define set_pte_atomic(ptep, pte) \
190 native_set_pte_atomic(ptep, pte)
192 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
194 #ifndef __PAGETABLE_PUD_FOLDED
195 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
196 #define pgd_clear(pgd) native_pgd_clear(pgd)
200 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
203 #ifndef __PAGETABLE_PMD_FOLDED
204 #define pud_clear(pud) native_pud_clear(pud)
207 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
208 #define pmd_clear(pmd) native_pmd_clear(pmd)
210 #define pte_update(mm, addr, ptep) do { } while (0)
211 #define pte_update_defer(mm, addr, ptep) do { } while (0)
212 #endif /* CONFIG_PARAVIRT */
214 #endif /* __ASSEMBLY__ */
217 # include "pgtable_32.h"
219 # include "pgtable_64.h"
224 /* local pte updates need not use xchg for locking */
225 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
229 /* Pure native function needs no input for mm, addr */
230 native_pte_clear(NULL, 0, ptep);
234 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
235 pte_t *ptep , pte_t pte)
237 native_set_pte(ptep, pte);
240 #ifndef CONFIG_PARAVIRT
242 * Rules for using pte_update - it must be called after any PTE update which
243 * has not been done using the set_pte / clear_pte interfaces. It is used by
244 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
245 * updates should either be sets, clears, or set_pte_atomic for P->P
246 * transitions, which means this hook should only be called for user PTEs.
247 * This hook implies a P->P protection or access change has taken place, which
248 * requires a subsequent TLB flush. The notification can optionally be delayed
249 * until the TLB flush event by using the pte_update_defer form of the
250 * interface, but care must be taken to assure that the flush happens while
251 * still holding the same page table lock so that the shadow and primary pages
252 * do not become out of sync on SMP.
254 #define pte_update(mm, addr, ptep) do { } while (0)
255 #define pte_update_defer(mm, addr, ptep) do { } while (0)
259 * We only update the dirty/accessed state if we set
260 * the dirty bit by hand in the kernel, since the hardware
261 * will do the accessed bit for us, and we don't want to
262 * race with other CPU's that might be updating the dirty
263 * bit at the same time.
265 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
266 #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
268 int __changed = !pte_same(*(ptep), entry); \
269 if (__changed && dirty) { \
271 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
272 flush_tlb_page(vma, address); \
277 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
278 #define ptep_test_and_clear_young(vma, addr, ptep) ({ \
280 if (pte_young(*(ptep))) \
281 __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
284 pte_update((vma)->vm_mm, addr, ptep); \
288 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
289 #define ptep_clear_flush_young(vma, address, ptep) \
292 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
294 flush_tlb_page(vma, address); \
298 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
299 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
301 pte_t pte = native_ptep_get_and_clear(ptep);
302 pte_update(mm, addr, ptep);
306 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
307 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
312 * Full address destruction in progress; paravirt does not
313 * care about updates and native needs no locking
315 pte = native_local_ptep_get_and_clear(ptep);
317 pte = ptep_get_and_clear(mm, addr, ptep);
322 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
323 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
325 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
326 pte_update(mm, addr, ptep);
329 #include <asm-generic/pgtable.h>
330 #endif /* __ASSEMBLY__ */
332 #endif /* _ASM_X86_PGTABLE_H */