2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
6 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <asm/uaccess.h>
12 #include <asm/processor.h>
14 /* Should move most of this stuff into the appropiate includes */
15 #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
16 #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
18 static inline pte_t *lookup_address(unsigned long address)
21 pgd_t *pgd = pgd_offset(&init_mm, address);
25 if (pgd_val(*pgd) & _PAGE_PSE)
27 pmd = pmd_offset(pgd, address);
30 if (pmd_val(*pmd) & _PAGE_PSE)
32 return pte_offset(pmd, address);
35 static struct page *split_large_page(unsigned long address, pgprot_t prot)
39 struct page *base = alloc_pages(GFP_KERNEL, 0);
43 address = __pa(address);
44 addr = address & LARGE_PAGE_MASK;
45 pbase = (pte_t *)page_address(base);
46 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
47 pbase[i] = mk_pte_phys(addr,
48 addr == address ? prot : PAGE_KERNEL);
53 static void flush_kernel_map(void * address)
55 /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
56 if (boot_cpu_data.x86_model >= 4)
57 asm volatile("wbinvd":::"memory");
59 /* Do global flush here to work around large page flushing errata
60 in some early Athlons */
64 static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
66 set_pte_atomic(kpte, pte); /* change init_mm */
67 #ifndef CONFIG_X86_PAE
70 spin_lock(&mmlist_lock);
71 list_for_each(l, &init_mm.mmlist) {
72 struct mm_struct *mm = list_entry(l, struct mm_struct, mmlist);
73 pmd_t *pmd = pmd_offset(pgd_offset(mm, address), address);
74 set_pte_atomic((pte_t *)pmd, pte);
76 spin_unlock(&mmlist_lock);
81 /* no more special protections in this 2/4MB area - revert to a
83 static inline void revert_page(struct page *kpte_page, unsigned long address)
85 pte_t *linear = (pte_t *)
86 pmd_offset(pgd_offset(&init_mm, address), address);
87 set_pmd_pte(linear, address,
88 mk_pte_phys(__pa(address & LARGE_PAGE_MASK),
89 MAKE_GLOBAL(_KERNPG_TABLE|_PAGE_PSE)));
93 * Change the page attributes of an page in the linear mapping.
95 * This should be used when a page is mapped with a different caching policy
96 * than write-back somewhere - some CPUs do not like it when mappings with
97 * different caching policies exist. This changes the page attributes of the
98 * in kernel linear mapping too.
100 * The caller needs to ensure that there are no conflicting mappings elsewhere.
101 * This function only deals with the kernel linear map.
102 * When page is in highmem it must never be kmap'ed.
105 __change_page_attr(struct page *page, pgprot_t prot, struct page **oldpage)
108 unsigned long address;
109 struct page *kpte_page;
111 #ifdef CONFIG_HIGHMEM
112 if (page >= highmem_start_page)
115 address = (unsigned long)page_address(page);
116 kpte = lookup_address(address);
119 kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
120 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
121 if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
122 set_pte_atomic(kpte, mk_pte(page, prot));
124 struct page *split = split_large_page(address, prot);
127 set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
130 atomic_inc(&kpte_page->count);
131 } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
132 set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
133 atomic_dec(&kpte_page->count);
136 if (cpu_has_pse && (atomic_read(&kpte_page->count) == 1)) {
137 *oldpage = kpte_page;
138 revert_page(kpte_page, address);
143 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
149 down_write(&init_mm.mmap_sem);
150 for (i = 0; i < numpages; i++, page++) {
152 err = __change_page_attr(page, prot, &fpage);
155 if (fpage || i == numpages-1) {
156 void *address = page_address(page);
158 smp_call_function(flush_kernel_map, address, 1, 1);
160 flush_kernel_map(address);
165 up_write(&init_mm.mmap_sem);
169 EXPORT_SYMBOL(change_page_attr);