416dd4b7edb36c71ee17000a3a124eae43048c76
[linux-2.4.git] / mremap.c
1 /*
2  *      linux/mm/remap.c
3  *
4  *      (C) Copyright 1996 Linus Torvalds
5  */
6
7 #include <linux/slab.h>
8 #include <linux/smp_lock.h>
9 #include <linux/shm.h>
10 #include <linux/mman.h>
11 #include <linux/swap.h>
12
13 #include <asm/uaccess.h>
14 #include <asm/pgalloc.h>
15
16 extern int vm_enough_memory(long pages);
17
18 static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
19 {
20         pgd_t * pgd;
21         pmd_t * pmd;
22         pte_t * pte = NULL;
23
24         pgd = pgd_offset(mm, addr);
25         if (pgd_none(*pgd))
26                 goto end;
27         if (pgd_bad(*pgd)) {
28                 pgd_ERROR(*pgd);
29                 pgd_clear(pgd);
30                 goto end;
31         }
32
33         pmd = pmd_offset(pgd, addr);
34         if (pmd_none(*pmd))
35                 goto end;
36         if (pmd_bad(*pmd)) {
37                 pmd_ERROR(*pmd);
38                 pmd_clear(pmd);
39                 goto end;
40         }
41
42         pte = pte_offset(pmd, addr);
43         if (pte_none(*pte))
44                 pte = NULL;
45 end:
46         return pte;
47 }
48
49 static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr)
50 {
51         pmd_t * pmd;
52         pte_t * pte = NULL;
53
54         pmd = pmd_alloc(mm, pgd_offset(mm, addr), addr);
55         if (pmd)
56                 pte = pte_alloc(mm, pmd, addr);
57         return pte;
58 }
59
60 static inline int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst)
61 {
62         int error = 0;
63         pte_t pte;
64
65         if (!pte_none(*src)) {
66                 pte = ptep_get_and_clear(src);
67                 if (!dst) {
68                         /* No dest?  We must put it back. */
69                         dst = src;
70                         error++;
71                 }
72                 set_pte(dst, pte);
73         }
74         return error;
75 }
76
77 static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr)
78 {
79         int error = 0;
80         pte_t * src, * dst;
81
82         spin_lock(&mm->page_table_lock);
83         src = get_one_pte(mm, old_addr);
84         if (src) {
85                 dst = alloc_one_pte(mm, new_addr);
86                 src = get_one_pte(mm, old_addr);
87                 if (src) 
88                         error = copy_one_pte(mm, src, dst);
89         }
90         spin_unlock(&mm->page_table_lock);
91         return error;
92 }
93
94 static int move_page_tables(struct mm_struct * mm,
95         unsigned long new_addr, unsigned long old_addr, unsigned long len)
96 {
97         unsigned long offset = len;
98
99         flush_cache_range(mm, old_addr, old_addr + len);
100
101         /*
102          * This is not the clever way to do this, but we're taking the
103          * easy way out on the assumption that most remappings will be
104          * only a few pages.. This also makes error recovery easier.
105          */
106         while (offset) {
107                 offset -= PAGE_SIZE;
108                 if (move_one_page(mm, old_addr + offset, new_addr + offset))
109                         goto oops_we_failed;
110         }
111         flush_tlb_range(mm, old_addr, old_addr + len);
112         return 0;
113
114         /*
115          * Ok, the move failed because we didn't have enough pages for
116          * the new page table tree. This is unlikely, but we have to
117          * take the possibility into account. In that case we just move
118          * all the pages back (this will work, because we still have
119          * the old page tables)
120          */
121 oops_we_failed:
122         flush_cache_range(mm, new_addr, new_addr + len);
123         while ((offset += PAGE_SIZE) < len)
124                 move_one_page(mm, new_addr + offset, old_addr + offset);
125         zap_page_range(mm, new_addr, len);
126         return -1;
127 }
128
129 static inline unsigned long move_vma(struct vm_area_struct * vma,
130         unsigned long addr, unsigned long old_len, unsigned long new_len,
131         unsigned long new_addr)
132 {
133         struct mm_struct * mm = vma->vm_mm;
134         struct vm_area_struct * new_vma, * next, * prev;
135         int allocated_vma;
136
137         new_vma = NULL;
138         next = find_vma_prev(mm, new_addr, &prev);
139         if (next) {
140                 if (prev && prev->vm_end == new_addr &&
141                     can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
142                         spin_lock(&mm->page_table_lock);
143                         prev->vm_end = new_addr + new_len;
144                         spin_unlock(&mm->page_table_lock);
145                         new_vma = prev;
146                         if (next != prev->vm_next)
147                                 BUG();
148                         if (prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags)) {
149                                 spin_lock(&mm->page_table_lock);
150                                 prev->vm_end = next->vm_end;
151                                 __vma_unlink(mm, next, prev);
152                                 spin_unlock(&mm->page_table_lock);
153
154                                 mm->map_count--;
155                                 kmem_cache_free(vm_area_cachep, next);
156                         }
157                 } else if (next->vm_start == new_addr + new_len &&
158                            can_vma_merge(next, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
159                         spin_lock(&mm->page_table_lock);
160                         next->vm_start = new_addr;
161                         spin_unlock(&mm->page_table_lock);
162                         new_vma = next;
163                 }
164         } else {
165                 prev = find_vma(mm, new_addr-1);
166                 if (prev && prev->vm_end == new_addr &&
167                     can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
168                         spin_lock(&mm->page_table_lock);
169                         prev->vm_end = new_addr + new_len;
170                         spin_unlock(&mm->page_table_lock);
171                         new_vma = prev;
172                 }
173         }
174
175         allocated_vma = 0;
176         if (!new_vma) {
177                 new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
178                 if (!new_vma)
179                         goto out;
180                 allocated_vma = 1;
181         }
182
183         if (!move_page_tables(current->mm, new_addr, addr, old_len)) {
184                 unsigned long vm_locked = vma->vm_flags & VM_LOCKED;
185
186                 if (allocated_vma) {
187                         *new_vma = *vma;
188                         new_vma->vm_start = new_addr;
189                         new_vma->vm_end = new_addr+new_len;
190                         new_vma->vm_pgoff += (addr-vma->vm_start) >> PAGE_SHIFT;
191                         new_vma->vm_raend = 0;
192                         if (new_vma->vm_file)
193                                 get_file(new_vma->vm_file);
194                         if (new_vma->vm_ops && new_vma->vm_ops->open)
195                                 new_vma->vm_ops->open(new_vma);
196                         insert_vm_struct(current->mm, new_vma);
197                 }
198
199                 /* XXX: possible errors masked, mapping might remain */
200                 do_munmap(current->mm, addr, old_len);
201
202                 current->mm->total_vm += new_len >> PAGE_SHIFT;
203                 if (vm_locked) {
204                         current->mm->locked_vm += new_len >> PAGE_SHIFT;
205                         if (new_len > old_len)
206                                 make_pages_present(new_addr + old_len,
207                                                    new_addr + new_len);
208                 }
209                 return new_addr;
210         }
211         if (allocated_vma)
212                 kmem_cache_free(vm_area_cachep, new_vma);
213  out:
214         return -ENOMEM;
215 }
216
217 /*
218  * Expand (or shrink) an existing mapping, potentially moving it at the
219  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
220  *
221  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
222  * This option implies MREMAP_MAYMOVE.
223  */
224 unsigned long do_mremap(unsigned long addr,
225         unsigned long old_len, unsigned long new_len,
226         unsigned long flags, unsigned long new_addr)
227 {
228         struct vm_area_struct *vma;
229         unsigned long ret = -EINVAL;
230
231         if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
232                 goto out;
233
234         if (addr & ~PAGE_MASK)
235                 goto out;
236
237         old_len = PAGE_ALIGN(old_len);
238         new_len = PAGE_ALIGN(new_len);
239
240         if (old_len > TASK_SIZE || addr > TASK_SIZE - old_len)
241                 goto out;
242
243         if (addr >= TASK_SIZE)
244                 goto out;
245
246         /* new_addr is only valid if MREMAP_FIXED is specified */
247         if (flags & MREMAP_FIXED) {
248                 if (new_addr & ~PAGE_MASK)
249                         goto out;
250                 if (!(flags & MREMAP_MAYMOVE))
251                         goto out;
252
253                 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
254                         goto out;
255
256                 if (new_addr >= TASK_SIZE)
257                         goto out;
258
259                 /*
260                  * Allow new_len == 0 only if new_addr == addr
261                  * to preserve truncation in place (that was working
262                  * safe and some app may depend on it).
263                  */
264                 if (unlikely(!new_len && new_addr != addr))
265                         goto out;
266
267                 /* Check if the location we're moving into overlaps the
268                  * old location at all, and fail if it does.
269                  */
270                 if ((new_addr <= addr) && (new_addr+new_len) > addr)
271                         goto out;
272
273                 if ((addr <= new_addr) && (addr+old_len) > new_addr)
274                         goto out;
275
276                 ret = do_munmap(current->mm, new_addr, new_len);
277                 if (ret && new_len)
278                         goto out;
279         }
280
281         /*
282          * Always allow a shrinking remap: that just unmaps
283          * the unnecessary pages..
284          */
285         if (old_len >= new_len) {
286                 ret = do_munmap(current->mm, addr+new_len, old_len - new_len);
287                 if (ret && old_len != new_len)
288                         goto out;
289                 ret = addr;
290                 if (!(flags & MREMAP_FIXED) || (new_addr == addr))
291                         goto out;
292         }
293
294         /*
295          * Ok, we need to grow..  or relocate.
296          */
297         ret = -EFAULT;
298         vma = find_vma(current->mm, addr);
299         if (!vma || vma->vm_start > addr)
300                 goto out;
301         /* We can't remap across vm area boundaries */
302         if (old_len > vma->vm_end - addr)
303                 goto out;
304         if (vma->vm_flags & VM_DONTEXPAND) {
305                 if (new_len > old_len)
306                         goto out;
307         }
308         if (vma->vm_flags & VM_LOCKED) {
309                 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
310                 locked += new_len - old_len;
311                 ret = -EAGAIN;
312                 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
313                         goto out;
314         }
315         ret = -ENOMEM;
316         if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
317             > current->rlim[RLIMIT_AS].rlim_cur)
318                 goto out;
319         /* Private writable mapping? Check memory availability.. */
320         if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
321             !(flags & MAP_NORESERVE)                             &&
322             !vm_enough_memory((new_len - old_len) >> PAGE_SHIFT))
323                 goto out;
324
325         /* old_len exactly to the end of the area..
326          * And we're not relocating the area.
327          */
328         if (old_len == vma->vm_end - addr &&
329             !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
330             (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
331                 unsigned long max_addr = TASK_SIZE;
332                 if (vma->vm_next)
333                         max_addr = vma->vm_next->vm_start;
334                 /* can we just expand the current mapping? */
335                 if (max_addr - addr >= new_len) {
336                         int pages = (new_len - old_len) >> PAGE_SHIFT;
337                         spin_lock(&vma->vm_mm->page_table_lock);
338                         vma->vm_end = addr + new_len;
339                         spin_unlock(&vma->vm_mm->page_table_lock);
340                         current->mm->total_vm += pages;
341                         if (vma->vm_flags & VM_LOCKED) {
342                                 current->mm->locked_vm += pages;
343                                 make_pages_present(addr + old_len,
344                                                    addr + new_len);
345                         }
346                         ret = addr;
347                         goto out;
348                 }
349         }
350
351         /*
352          * We weren't able to just expand or shrink the area,
353          * we need to create a new one and move it..
354          */
355         ret = -ENOMEM;
356         if (flags & MREMAP_MAYMOVE) {
357                 if (!(flags & MREMAP_FIXED)) {
358                         unsigned long map_flags = 0;
359                         if (vma->vm_flags & VM_SHARED)
360                                 map_flags |= MAP_SHARED;
361
362                         new_addr = get_unmapped_area(vma->vm_file, 0, new_len, vma->vm_pgoff, map_flags);
363                         ret = new_addr;
364                         if (new_addr & ~PAGE_MASK)
365                                 goto out;
366                 }
367                 ret = move_vma(vma, addr, old_len, new_len, new_addr);
368         }
369 out:
370         return ret;
371 }
372
373 asmlinkage unsigned long sys_mremap(unsigned long addr,
374         unsigned long old_len, unsigned long new_len,
375         unsigned long flags, unsigned long new_addr)
376 {
377         unsigned long ret;
378
379         down_write(&current->mm->mmap_sem);
380         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
381         up_write(&current->mm->mmap_sem);
382         return ret;
383 }