4 * (C) Copyright 1996 Linus Torvalds
7 #include <linux/slab.h>
8 #include <linux/smp_lock.h>
10 #include <linux/mman.h>
11 #include <linux/swap.h>
13 #include <asm/uaccess.h>
14 #include <asm/pgalloc.h>
16 extern int vm_enough_memory(long pages);
18 static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
24 pgd = pgd_offset(mm, addr);
33 pmd = pmd_offset(pgd, addr);
42 pte = pte_offset(pmd, addr);
49 static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr)
54 pmd = pmd_alloc(mm, pgd_offset(mm, addr), addr);
56 pte = pte_alloc(mm, pmd, addr);
60 static inline int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst)
65 if (!pte_none(*src)) {
66 pte = ptep_get_and_clear(src);
68 /* No dest? We must put it back. */
77 static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr)
82 spin_lock(&mm->page_table_lock);
83 src = get_one_pte(mm, old_addr);
85 error = copy_one_pte(mm, src, alloc_one_pte(mm, new_addr));
86 spin_unlock(&mm->page_table_lock);
90 static int move_page_tables(struct mm_struct * mm,
91 unsigned long new_addr, unsigned long old_addr, unsigned long len)
93 unsigned long offset = len;
95 flush_cache_range(mm, old_addr, old_addr + len);
98 * This is not the clever way to do this, but we're taking the
99 * easy way out on the assumption that most remappings will be
100 * only a few pages.. This also makes error recovery easier.
104 if (move_one_page(mm, old_addr + offset, new_addr + offset))
107 flush_tlb_range(mm, old_addr, old_addr + len);
111 * Ok, the move failed because we didn't have enough pages for
112 * the new page table tree. This is unlikely, but we have to
113 * take the possibility into account. In that case we just move
114 * all the pages back (this will work, because we still have
115 * the old page tables)
118 flush_cache_range(mm, new_addr, new_addr + len);
119 while ((offset += PAGE_SIZE) < len)
120 move_one_page(mm, new_addr + offset, old_addr + offset);
121 zap_page_range(mm, new_addr, len);
125 static inline unsigned long move_vma(struct vm_area_struct * vma,
126 unsigned long addr, unsigned long old_len, unsigned long new_len,
127 unsigned long new_addr)
129 struct mm_struct * mm = vma->vm_mm;
130 struct vm_area_struct * new_vma, * next, * prev;
134 next = find_vma_prev(mm, new_addr, &prev);
136 if (prev && prev->vm_end == new_addr &&
137 can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
138 spin_lock(&mm->page_table_lock);
139 prev->vm_end = new_addr + new_len;
140 spin_unlock(&mm->page_table_lock);
142 if (next != prev->vm_next)
144 if (prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags)) {
145 spin_lock(&mm->page_table_lock);
146 prev->vm_end = next->vm_end;
147 __vma_unlink(mm, next, prev);
148 spin_unlock(&mm->page_table_lock);
151 kmem_cache_free(vm_area_cachep, next);
153 } else if (next->vm_start == new_addr + new_len &&
154 can_vma_merge(next, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
155 spin_lock(&mm->page_table_lock);
156 next->vm_start = new_addr;
157 spin_unlock(&mm->page_table_lock);
161 prev = find_vma(mm, new_addr-1);
162 if (prev && prev->vm_end == new_addr &&
163 can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
164 spin_lock(&mm->page_table_lock);
165 prev->vm_end = new_addr + new_len;
166 spin_unlock(&mm->page_table_lock);
173 new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
179 if (!move_page_tables(current->mm, new_addr, addr, old_len)) {
182 new_vma->vm_start = new_addr;
183 new_vma->vm_end = new_addr+new_len;
184 new_vma->vm_pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
185 new_vma->vm_raend = 0;
186 if (new_vma->vm_file)
187 get_file(new_vma->vm_file);
188 if (new_vma->vm_ops && new_vma->vm_ops->open)
189 new_vma->vm_ops->open(new_vma);
190 insert_vm_struct(current->mm, new_vma);
192 do_munmap(current->mm, addr, old_len);
193 current->mm->total_vm += new_len >> PAGE_SHIFT;
194 if (new_vma->vm_flags & VM_LOCKED) {
195 current->mm->locked_vm += new_len >> PAGE_SHIFT;
196 make_pages_present(new_vma->vm_start,
202 kmem_cache_free(vm_area_cachep, new_vma);
208 * Expand (or shrink) an existing mapping, potentially moving it at the
209 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
211 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
212 * This option implies MREMAP_MAYMOVE.
214 unsigned long do_mremap(unsigned long addr,
215 unsigned long old_len, unsigned long new_len,
216 unsigned long flags, unsigned long new_addr)
218 struct vm_area_struct *vma;
219 unsigned long ret = -EINVAL;
221 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
224 if (addr & ~PAGE_MASK)
227 old_len = PAGE_ALIGN(old_len);
228 new_len = PAGE_ALIGN(new_len);
230 /* new_addr is only valid if MREMAP_FIXED is specified */
231 if (flags & MREMAP_FIXED) {
232 if (new_addr & ~PAGE_MASK)
234 if (!(flags & MREMAP_MAYMOVE))
237 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
240 /* Check if the location we're moving into overlaps the
241 * old location at all, and fail if it does.
243 if ((new_addr <= addr) && (new_addr+new_len) > addr)
246 if ((addr <= new_addr) && (addr+old_len) > new_addr)
249 do_munmap(current->mm, new_addr, new_len);
253 * Always allow a shrinking remap: that just unmaps
254 * the unnecessary pages..
257 if (old_len >= new_len) {
258 do_munmap(current->mm, addr+new_len, old_len - new_len);
259 if (!(flags & MREMAP_FIXED) || (new_addr == addr))
264 * Ok, we need to grow.. or relocate.
267 vma = find_vma(current->mm, addr);
268 if (!vma || vma->vm_start > addr)
270 /* We can't remap across vm area boundaries */
271 if (old_len > vma->vm_end - addr)
273 if (vma->vm_flags & VM_DONTEXPAND) {
274 if (new_len > old_len)
277 if (vma->vm_flags & VM_LOCKED) {
278 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
279 locked += new_len - old_len;
281 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
285 if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
286 > current->rlim[RLIMIT_AS].rlim_cur)
288 /* Private writable mapping? Check memory availability.. */
289 if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
290 !(flags & MAP_NORESERVE) &&
291 !vm_enough_memory((new_len - old_len) >> PAGE_SHIFT))
294 /* old_len exactly to the end of the area..
295 * And we're not relocating the area.
297 if (old_len == vma->vm_end - addr &&
298 !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
299 (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
300 unsigned long max_addr = TASK_SIZE;
302 max_addr = vma->vm_next->vm_start;
303 /* can we just expand the current mapping? */
304 if (max_addr - addr >= new_len) {
305 int pages = (new_len - old_len) >> PAGE_SHIFT;
306 spin_lock(&vma->vm_mm->page_table_lock);
307 vma->vm_end = addr + new_len;
308 spin_unlock(&vma->vm_mm->page_table_lock);
309 current->mm->total_vm += pages;
310 if (vma->vm_flags & VM_LOCKED) {
311 current->mm->locked_vm += pages;
312 make_pages_present(addr + old_len,
321 * We weren't able to just expand or shrink the area,
322 * we need to create a new one and move it..
325 if (flags & MREMAP_MAYMOVE) {
326 if (!(flags & MREMAP_FIXED)) {
327 unsigned long map_flags = 0;
328 if (vma->vm_flags & VM_SHARED)
329 map_flags |= MAP_SHARED;
331 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, vma->vm_pgoff, map_flags);
333 if (new_addr & ~PAGE_MASK)
336 ret = move_vma(vma, addr, old_len, new_len, new_addr);
342 asmlinkage unsigned long sys_mremap(unsigned long addr,
343 unsigned long old_len, unsigned long new_len,
344 unsigned long flags, unsigned long new_addr)
348 down_write(¤t->mm->mmap_sem);
349 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
350 up_write(¤t->mm->mmap_sem);