1 #ifndef _I386_PGTABLE_H
2 #define _I386_PGTABLE_H
6 * The Linux memory management assumes a three-level page table setup. On
7 * the i386, we use that, but "fold" the mid level into the top-level page
8 * table, so that we physically have the same two-level page table as the
11 * This file contains the functions and defines necessary to modify and use
12 * the i386 page table tree.
15 #include <asm/processor.h>
16 #include <asm/fixmap.h>
17 #include <linux/threads.h>
18 #include <asm/paravirt.h>
20 #include <linux/bitops.h>
21 #include <linux/slab.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
26 struct vm_area_struct;
29 * ZERO_PAGE is a global shared page that is always zero: used
30 * for zero-mapped memory areas etc..
32 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
33 extern unsigned long empty_zero_page[1024];
34 extern pgd_t swapper_pg_dir[1024];
35 extern struct kmem_cache *pmd_cache;
36 extern spinlock_t pgd_lock;
37 extern struct page *pgd_list;
38 void check_pgt_cache(void);
40 void pmd_ctor(struct kmem_cache *, void *);
41 void pgtable_cache_init(void);
42 void paging_init(void);
46 * The Linux x86 paging architecture is 'compile-time dual-mode', it
47 * implements both the traditional 2-level x86 page tables and the
48 * newer 3-level PAE-mode page tables.
51 # include <asm/pgtable-3level-defs.h>
52 # define PMD_SIZE (1UL << PMD_SHIFT)
53 # define PMD_MASK (~(PMD_SIZE-1))
55 # include <asm/pgtable-2level-defs.h>
58 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
59 #define PGDIR_MASK (~(PGDIR_SIZE-1))
61 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
62 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
64 #define TWOLEVEL_PGDIR_SHIFT 22
65 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
66 #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
68 /* Just any arbitrary offset to the start of the vmalloc VM area: the
69 * current 8MB value just means that there will be a 8MB "hole" after the
70 * physical memory until the kernel virtual memory starts. That means that
71 * any out-of-bounds memory accesses will hopefully be caught.
72 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
73 * area for the same reason. ;)
75 #define VMALLOC_OFFSET (8*1024*1024)
76 #define VMALLOC_START (((unsigned long) high_memory + \
77 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
79 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
81 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
85 * Define this if things work differently on an i386 and an i486:
86 * it will (on an i486) warn about kernel memory accesses that are
87 * done without a 'access_ok(VERIFY_WRITE,..)'
91 /* The boot page tables (all created as a single array) */
92 extern unsigned long pg0[];
94 #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
96 /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
97 #define pmd_none(x) (!(unsigned long)pmd_val(x))
98 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
99 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
102 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
105 * The following only work if pte_present() is true.
106 * Undefined behaviour if not..
108 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
109 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
110 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
111 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
114 * The following only works if pte_present() is not true.
116 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
118 static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_DIRTY); }
119 static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_ACCESSED); }
120 static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~_PAGE_RW); }
121 static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); }
122 static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); }
123 static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); }
124 static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); }
126 #ifdef CONFIG_X86_PAE
127 # include <asm/pgtable-3level.h>
129 # include <asm/pgtable-2level.h>
132 #ifndef CONFIG_PARAVIRT
134 * Rules for using pte_update - it must be called after any PTE update which
135 * has not been done using the set_pte / clear_pte interfaces. It is used by
136 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
137 * updates should either be sets, clears, or set_pte_atomic for P->P
138 * transitions, which means this hook should only be called for user PTEs.
139 * This hook implies a P->P protection or access change has taken place, which
140 * requires a subsequent TLB flush. The notification can optionally be delayed
141 * until the TLB flush event by using the pte_update_defer form of the
142 * interface, but care must be taken to assure that the flush happens while
143 * still holding the same page table lock so that the shadow and primary pages
144 * do not become out of sync on SMP.
146 #define pte_update(mm, addr, ptep) do { } while (0)
147 #define pte_update_defer(mm, addr, ptep) do { } while (0)
150 /* local pte updates need not use xchg for locking */
151 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
155 /* Pure native function needs no input for mm, addr */
156 native_pte_clear(NULL, 0, ptep);
161 * We only update the dirty/accessed state if we set
162 * the dirty bit by hand in the kernel, since the hardware
163 * will do the accessed bit for us, and we don't want to
164 * race with other CPU's that might be updating the dirty
165 * bit at the same time.
167 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
168 #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
170 int __changed = !pte_same(*(ptep), entry); \
171 if (__changed && dirty) { \
172 (ptep)->pte_low = (entry).pte_low; \
173 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
174 flush_tlb_page(vma, address); \
179 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
180 #define ptep_test_and_clear_young(vma, addr, ptep) ({ \
182 if (pte_young(*(ptep))) \
183 __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
186 pte_update((vma)->vm_mm, addr, ptep); \
190 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
191 #define ptep_clear_flush_young(vma, address, ptep) \
194 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
196 flush_tlb_page(vma, address); \
200 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
201 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
203 pte_t pte = native_ptep_get_and_clear(ptep);
204 pte_update(mm, addr, ptep);
208 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
209 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
214 * Full address destruction in progress; paravirt does not
215 * care about updates and native needs no locking
217 pte = native_local_ptep_get_and_clear(ptep);
219 pte = ptep_get_and_clear(mm, addr, ptep);
224 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
225 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
227 clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
228 pte_update(mm, addr, ptep);
232 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
234 * dst - pointer to pgd range anwhere on a pgd page
236 * count - the number of pgds to copy.
238 * dst and src can be on the same page, but the range must not overlap,
239 * and must not cross a page boundary.
241 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
243 memcpy(dst, src, count * sizeof(pgd_t));
247 * Macro to mark a page protection value as "uncacheable". On processors which do not support
248 * it, this is a no-op.
250 #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
251 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
254 * Conversion functions: convert a page and protection to a page entry,
255 * and a page entry and page directory to the page they refer to.
258 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
260 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
262 pte.pte_low &= _PAGE_CHG_MASK;
263 pte.pte_low |= pgprot_val(newprot);
264 #ifdef CONFIG_X86_PAE
266 * Chop off the NX bit (if present), and add the NX portion of
267 * the newprot (if present):
269 pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
270 pte.pte_high |= (pgprot_val(newprot) >> 32) & \
271 (__supported_pte_mask >> 32);
276 #define pmd_large(pmd) \
277 ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
280 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
282 * this macro returns the index of the entry in the pgd page which would
283 * control the given virtual address
285 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
286 #define pgd_index_k(addr) pgd_index(addr)
289 * pgd_offset() returns a (pgd_t *)
290 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
292 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
295 * a shortcut which implies the use of the kernel's pgd, instead
298 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
301 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
303 * this macro returns the index of the entry in the pmd page which would
304 * control the given virtual address
306 #define pmd_index(address) \
307 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
310 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
312 * this macro returns the index of the entry in the pte page which would
313 * control the given virtual address
315 #define pte_index(address) \
316 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
317 #define pte_offset_kernel(dir, address) \
318 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
320 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
322 #define pmd_page_vaddr(pmd) \
323 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
326 * Helper function that returns the kernel pagetable entry controlling
327 * the virtual address 'address'. NULL means no pagetable entry present.
328 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
331 extern pte_t *lookup_address(unsigned long address);
334 * Make a given kernel text page executable/non-executable.
335 * Returns the previous executability setting of that page (which
336 * is used to restore the previous state). Used by the SMP bootup code.
337 * NOTE: this is an __init function for security reasons.
339 #ifdef CONFIG_X86_PAE
340 extern int set_kernel_exec(unsigned long vaddr, int enable);
342 static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
345 #if defined(CONFIG_HIGHPTE)
346 #define pte_offset_map(dir, address) \
347 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
348 #define pte_offset_map_nested(dir, address) \
349 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
350 #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
351 #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
353 #define pte_offset_map(dir, address) \
354 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
355 #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
356 #define pte_unmap(pte) do { } while (0)
357 #define pte_unmap_nested(pte) do { } while (0)
360 /* Clear a kernel PTE and flush it from the TLB */
361 #define kpte_clear_flush(ptep, vaddr) \
363 pte_clear(&init_mm, vaddr, ptep); \
364 __flush_tlb_one(vaddr); \
368 * The i386 doesn't have any external MMU info: the kernel page
369 * tables contain all the necessary information.
371 #define update_mmu_cache(vma,address,pte) do { } while (0)
373 void native_pagetable_setup_start(pgd_t *base);
374 void native_pagetable_setup_done(pgd_t *base);
376 #ifndef CONFIG_PARAVIRT
377 static inline void paravirt_pagetable_setup_start(pgd_t *base)
379 native_pagetable_setup_start(base);
382 static inline void paravirt_pagetable_setup_done(pgd_t *base)
384 native_pagetable_setup_done(base);
386 #endif /* !CONFIG_PARAVIRT */
388 #endif /* !__ASSEMBLY__ */
391 * kern_addr_valid() is (1) for FLATMEM and (0) for
392 * SPARSEMEM and DISCONTIGMEM
394 #ifdef CONFIG_FLATMEM
395 #define kern_addr_valid(addr) (1)
397 #define kern_addr_valid(kaddr) (0)
400 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
401 remap_pfn_range(vma, vaddr, pfn, size, prot)
403 #include <asm-generic/pgtable.h>
405 #endif /* _I386_PGTABLE_H */