import of upstream 2.4.34.4 from kernel.org
[linux-2.4.git] / arch / i386 / mm / pageattr.c
1 /* 
2  * Copyright 2002 Andi Kleen, SuSE Labs. 
3  * Thanks to Ben LaHaise for precious feedback.
4  */ 
5
6 #include <linux/config.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <asm/uaccess.h>
12 #include <asm/processor.h>
13
14 /* Should move most of this stuff into the appropiate includes */
15 #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
16 #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
17
18 static inline pte_t *lookup_address(unsigned long address) 
19
20         pmd_t *pmd;     
21         pgd_t *pgd = pgd_offset(&init_mm, address); 
22
23         if (pgd_none(*pgd))
24                 return NULL; 
25         if (pgd_val(*pgd) & _PAGE_PSE)
26                 return (pte_t *)pgd; 
27         pmd = pmd_offset(pgd, address);                
28         if (pmd_none(*pmd))
29                 return NULL; 
30         if (pmd_val(*pmd) & _PAGE_PSE) 
31                 return (pte_t *)pmd; 
32     return pte_offset(pmd, address);
33
34
35 static struct page *split_large_page(unsigned long address, pgprot_t prot)
36
37         int i; 
38         unsigned long addr;
39         struct page *base = alloc_pages(GFP_KERNEL, 0);
40         pte_t *pbase;
41         if (!base) 
42                 return NULL;
43         address = __pa(address);
44         addr = address & LARGE_PAGE_MASK; 
45         pbase = (pte_t *)page_address(base);
46         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
47                 pbase[i] = mk_pte_phys(addr, 
48                                       addr == address ? prot : PAGE_KERNEL);
49         }
50         return base;
51
52
53 static void flush_kernel_map(void * address) 
54
55         /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
56         if (boot_cpu_data.x86_model >= 4) 
57                 asm volatile("wbinvd":::"memory");      
58
59         /* Do global flush here to work around large page flushing errata 
60            in some early Athlons */
61         __flush_tlb_all();      
62 }
63
64 static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 
65
66         set_pte_atomic(kpte, pte);      /* change init_mm */
67 #ifndef CONFIG_X86_PAE
68         {
69                 struct list_head *l;
70                 spin_lock(&mmlist_lock);
71                 list_for_each(l, &init_mm.mmlist) { 
72                         struct mm_struct *mm = list_entry(l, struct mm_struct, mmlist);
73                         pmd_t *pmd = pmd_offset(pgd_offset(mm, address), address);
74                         set_pte_atomic((pte_t *)pmd, pte);
75                 } 
76                 spin_unlock(&mmlist_lock);
77         }
78 #endif
79 }
80
81 /* no more special protections in this 2/4MB area - revert to a
82    large page again. */
83 static inline void revert_page(struct page *kpte_page, unsigned long address)
84 {
85         pte_t *linear = (pte_t *) 
86                 pmd_offset(pgd_offset(&init_mm, address), address);
87         set_pmd_pte(linear,  address,
88                 mk_pte_phys(__pa(address & LARGE_PAGE_MASK),
89                             MAKE_GLOBAL(_KERNPG_TABLE|_PAGE_PSE)));
90 }       
91  
92 /*
93  * Change the page attributes of an page in the linear mapping.
94  *
95  * This should be used when a page is mapped with a different caching policy
96  * than write-back somewhere - some CPUs do not like it when mappings with
97  * different caching policies exist. This changes the page attributes of the
98  * in kernel linear mapping too.
99  * 
100  * The caller needs to ensure that there are no conflicting mappings elsewhere.
101  * This function only deals with the kernel linear map.
102  * When page is in highmem it must never be kmap'ed.
103  */
104 static int 
105 __change_page_attr(struct page *page, pgprot_t prot, struct page **oldpage) 
106
107         pte_t *kpte; 
108         unsigned long address;
109         struct page *kpte_page;
110
111 #ifdef CONFIG_HIGHMEM
112         if (page >= highmem_start_page) 
113                 BUG(); 
114 #endif
115         address = (unsigned long)page_address(page);
116         kpte = lookup_address(address);
117         if (!kpte) 
118                 return -EINVAL; 
119         kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
120         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
121                 if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
122                         set_pte_atomic(kpte, mk_pte(page, prot)); 
123                 } else {
124                         struct page *split = split_large_page(address, prot); 
125                         if (!split)
126                                 return -ENOMEM;
127                         set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
128                         kpte_page = split;
129                 }       
130                 atomic_inc(&kpte_page->count);
131         } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
132                 set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
133                 atomic_dec(&kpte_page->count); 
134         }
135
136         if (cpu_has_pse && (atomic_read(&kpte_page->count) == 1)) { 
137                 *oldpage = kpte_page;
138                 revert_page(kpte_page, address);
139         } 
140         return 0;
141
142
143 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
144 {
145         int err = 0; 
146         struct page *fpage; 
147         int i; 
148
149         down_write(&init_mm.mmap_sem);
150         for (i = 0; i < numpages; i++, page++) { 
151                 fpage = NULL;
152                 err = __change_page_attr(page, prot, &fpage); 
153                 if (err) 
154                         break; 
155                 if (fpage || i == numpages-1) { 
156                         void *address = page_address(page);
157 #ifdef CONFIG_SMP 
158                         smp_call_function(flush_kernel_map, address, 1, 1);
159 #endif  
160                         flush_kernel_map(address);
161                         if (fpage)
162                                 __free_page(fpage);
163                 } 
164         }       
165         up_write(&init_mm.mmap_sem); 
166         return err;
167 }
168
169 EXPORT_SYMBOL(change_page_attr);