4 * (C) Copyright 1994 Linus Torvalds
6 #include <linux/slab.h>
7 #include <linux/smp_lock.h>
9 #include <linux/mman.h>
11 #include <asm/uaccess.h>
12 #include <asm/pgalloc.h>
13 #include <asm/pgtable.h>
15 static inline void change_pte_range(pmd_t * pmd, unsigned long address,
16 unsigned long size, pgprot_t newprot)
28 pte = pte_offset(pmd, address);
34 if (pte_present(*pte)) {
37 /* Avoid an SMP race with hardware updated dirty/clean
38 * bits by wiping the pte and then setting the new pte
41 entry = ptep_get_and_clear(pte);
42 set_pte(pte, pte_modify(entry, newprot));
46 } while (address && (address < end));
49 static inline void change_pmd_range(pgd_t * pgd, unsigned long address,
50 unsigned long size, pgprot_t newprot)
62 pmd = pmd_offset(pgd, address);
63 address &= ~PGDIR_MASK;
68 change_pte_range(pmd, address, end - address, newprot);
69 address = (address + PMD_SIZE) & PMD_MASK;
71 } while (address && (address < end));
74 static void change_protection(unsigned long start, unsigned long end, pgprot_t newprot)
77 unsigned long beg = start;
79 dir = pgd_offset(current->mm, start);
80 flush_cache_range(current->mm, beg, end);
83 spin_lock(¤t->mm->page_table_lock);
85 change_pmd_range(dir, start, end - start, newprot);
86 start = (start + PGDIR_SIZE) & PGDIR_MASK;
88 } while (start && (start < end));
89 spin_unlock(¤t->mm->page_table_lock);
90 flush_tlb_range(current->mm, beg, end);
94 static inline int mprotect_fixup_all(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
95 int newflags, pgprot_t prot)
97 struct vm_area_struct * prev = *pprev;
98 struct mm_struct * mm = vma->vm_mm;
100 if (prev && prev->vm_end == vma->vm_start && can_vma_merge(prev, newflags) &&
101 !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
102 spin_lock(&mm->page_table_lock);
103 prev->vm_end = vma->vm_end;
104 __vma_unlink(mm, vma, prev);
105 spin_unlock(&mm->page_table_lock);
107 kmem_cache_free(vm_area_cachep, vma);
113 spin_lock(&mm->page_table_lock);
114 vma->vm_flags = newflags;
115 vma->vm_page_prot = prot;
116 spin_unlock(&mm->page_table_lock);
123 static inline int mprotect_fixup_start(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
125 int newflags, pgprot_t prot)
127 struct vm_area_struct * n, * prev = *pprev;
131 if (prev && prev->vm_end == vma->vm_start && can_vma_merge(prev, newflags) &&
132 !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
133 spin_lock(&vma->vm_mm->page_table_lock);
136 spin_unlock(&vma->vm_mm->page_table_lock);
140 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
145 n->vm_flags = newflags;
147 n->vm_page_prot = prot;
149 get_file(n->vm_file);
150 if (n->vm_ops && n->vm_ops->open)
152 vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
153 lock_vma_mappings(vma);
154 spin_lock(&vma->vm_mm->page_table_lock);
156 __insert_vm_struct(current->mm, n);
157 spin_unlock(&vma->vm_mm->page_table_lock);
158 unlock_vma_mappings(vma);
163 static inline int mprotect_fixup_end(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
165 int newflags, pgprot_t prot)
167 struct vm_area_struct * n;
169 n = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
174 n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
175 n->vm_flags = newflags;
177 n->vm_page_prot = prot;
179 get_file(n->vm_file);
180 if (n->vm_ops && n->vm_ops->open)
182 lock_vma_mappings(vma);
183 spin_lock(&vma->vm_mm->page_table_lock);
185 __insert_vm_struct(current->mm, n);
186 spin_unlock(&vma->vm_mm->page_table_lock);
187 unlock_vma_mappings(vma);
194 static inline int mprotect_fixup_middle(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
195 unsigned long start, unsigned long end,
196 int newflags, pgprot_t prot)
198 struct vm_area_struct * left, * right;
200 left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
203 right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
205 kmem_cache_free(vm_area_cachep, left);
210 left->vm_end = start;
211 right->vm_start = end;
212 right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
216 atomic_add(2,&vma->vm_file->f_count);
217 if (vma->vm_ops && vma->vm_ops->open) {
218 vma->vm_ops->open(left);
219 vma->vm_ops->open(right);
221 vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
223 vma->vm_page_prot = prot;
224 lock_vma_mappings(vma);
225 spin_lock(&vma->vm_mm->page_table_lock);
226 vma->vm_start = start;
228 vma->vm_flags = newflags;
229 __insert_vm_struct(current->mm, left);
230 __insert_vm_struct(current->mm, right);
231 spin_unlock(&vma->vm_mm->page_table_lock);
232 unlock_vma_mappings(vma);
239 static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
240 unsigned long start, unsigned long end, unsigned int newflags)
245 if (newflags == vma->vm_flags) {
249 newprot = protection_map[newflags & 0xf];
250 if (start == vma->vm_start) {
251 if (end == vma->vm_end)
252 error = mprotect_fixup_all(vma, pprev, newflags, newprot);
254 error = mprotect_fixup_start(vma, pprev, end, newflags, newprot);
255 } else if (end == vma->vm_end)
256 error = mprotect_fixup_end(vma, pprev, start, newflags, newprot);
258 error = mprotect_fixup_middle(vma, pprev, start, end, newflags, newprot);
263 change_protection(start, end, newprot);
267 asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
269 unsigned long nstart, end, tmp;
270 struct vm_area_struct * vma, * next, * prev;
273 if (start & ~PAGE_MASK)
275 len = PAGE_ALIGN(len);
279 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
284 down_write(¤t->mm->mmap_sem);
286 vma = find_vma_prev(current->mm, start, &prev);
288 if (!vma || vma->vm_start > start)
291 for (nstart = start ; ; ) {
292 unsigned int newflags;
295 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
297 newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
298 if ((newflags & ~(newflags >> 4)) & 0xf) {
303 if (vma->vm_end > end) {
304 error = mprotect_fixup(vma, &prev, nstart, end, newflags);
307 if (vma->vm_end == end)
312 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
319 if (!vma || vma->vm_start != nstart) {
324 if (next && prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags) &&
325 !prev->vm_file && !(prev->vm_flags & VM_SHARED)) {
326 spin_lock(&prev->vm_mm->page_table_lock);
327 prev->vm_end = next->vm_end;
328 __vma_unlink(prev->vm_mm, next, prev);
329 spin_unlock(&prev->vm_mm->page_table_lock);
331 kmem_cache_free(vm_area_cachep, next);
332 prev->vm_mm->map_count--;
335 up_write(¤t->mm->mmap_sem);