4 * (C) Copyright 1996 Linus Torvalds
7 #include <linux/slab.h>
8 #include <linux/smp_lock.h>
10 #include <linux/mman.h>
11 #include <linux/swap.h>
13 #include <asm/uaccess.h>
14 #include <asm/pgalloc.h>
16 extern int vm_enough_memory(long pages);
18 static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
24 pgd = pgd_offset(mm, addr);
33 pmd = pmd_offset(pgd, addr);
42 pte = pte_offset(pmd, addr);
49 static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr)
54 pmd = pmd_alloc(mm, pgd_offset(mm, addr), addr);
56 pte = pte_alloc(mm, pmd, addr);
60 static inline int copy_one_pte(struct mm_struct *mm, pte_t * src, pte_t * dst)
65 if (!pte_none(*src)) {
66 pte = ptep_get_and_clear(src);
68 /* No dest? We must put it back. */
77 static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr)
82 spin_lock(&mm->page_table_lock);
83 src = get_one_pte(mm, old_addr);
85 dst = alloc_one_pte(mm, new_addr);
86 src = get_one_pte(mm, old_addr);
88 error = copy_one_pte(mm, src, dst);
90 spin_unlock(&mm->page_table_lock);
94 static int move_page_tables(struct mm_struct * mm,
95 unsigned long new_addr, unsigned long old_addr, unsigned long len)
97 unsigned long offset = len;
99 flush_cache_range(mm, old_addr, old_addr + len);
102 * This is not the clever way to do this, but we're taking the
103 * easy way out on the assumption that most remappings will be
104 * only a few pages.. This also makes error recovery easier.
108 if (move_one_page(mm, old_addr + offset, new_addr + offset))
111 flush_tlb_range(mm, old_addr, old_addr + len);
115 * Ok, the move failed because we didn't have enough pages for
116 * the new page table tree. This is unlikely, but we have to
117 * take the possibility into account. In that case we just move
118 * all the pages back (this will work, because we still have
119 * the old page tables)
122 flush_cache_range(mm, new_addr, new_addr + len);
123 while ((offset += PAGE_SIZE) < len)
124 move_one_page(mm, new_addr + offset, old_addr + offset);
125 zap_page_range(mm, new_addr, len);
129 static inline unsigned long move_vma(struct vm_area_struct * vma,
130 unsigned long addr, unsigned long old_len, unsigned long new_len,
131 unsigned long new_addr)
133 struct mm_struct * mm = vma->vm_mm;
134 struct vm_area_struct * new_vma, * next, * prev;
138 next = find_vma_prev(mm, new_addr, &prev);
140 if (prev && prev->vm_end == new_addr &&
141 can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
142 spin_lock(&mm->page_table_lock);
143 prev->vm_end = new_addr + new_len;
144 spin_unlock(&mm->page_table_lock);
146 if (next != prev->vm_next)
148 if (prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags)) {
149 spin_lock(&mm->page_table_lock);
150 prev->vm_end = next->vm_end;
151 __vma_unlink(mm, next, prev);
152 spin_unlock(&mm->page_table_lock);
155 kmem_cache_free(vm_area_cachep, next);
157 } else if (next->vm_start == new_addr + new_len &&
158 can_vma_merge(next, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
159 spin_lock(&mm->page_table_lock);
160 next->vm_start = new_addr;
161 spin_unlock(&mm->page_table_lock);
165 prev = find_vma(mm, new_addr-1);
166 if (prev && prev->vm_end == new_addr &&
167 can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
168 spin_lock(&mm->page_table_lock);
169 prev->vm_end = new_addr + new_len;
170 spin_unlock(&mm->page_table_lock);
177 new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
183 if (!move_page_tables(current->mm, new_addr, addr, old_len)) {
184 unsigned long vm_locked = vma->vm_flags & VM_LOCKED;
188 new_vma->vm_start = new_addr;
189 new_vma->vm_end = new_addr+new_len;
190 new_vma->vm_pgoff += (addr-vma->vm_start) >> PAGE_SHIFT;
191 new_vma->vm_raend = 0;
192 if (new_vma->vm_file)
193 get_file(new_vma->vm_file);
194 if (new_vma->vm_ops && new_vma->vm_ops->open)
195 new_vma->vm_ops->open(new_vma);
196 insert_vm_struct(current->mm, new_vma);
199 /* XXX: possible errors masked, mapping might remain */
200 do_munmap(current->mm, addr, old_len);
202 current->mm->total_vm += new_len >> PAGE_SHIFT;
204 current->mm->locked_vm += new_len >> PAGE_SHIFT;
205 if (new_len > old_len)
206 make_pages_present(new_addr + old_len,
212 kmem_cache_free(vm_area_cachep, new_vma);
218 * Expand (or shrink) an existing mapping, potentially moving it at the
219 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
221 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
222 * This option implies MREMAP_MAYMOVE.
224 unsigned long do_mremap(unsigned long addr,
225 unsigned long old_len, unsigned long new_len,
226 unsigned long flags, unsigned long new_addr)
228 struct vm_area_struct *vma;
229 unsigned long ret = -EINVAL;
231 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
234 if (addr & ~PAGE_MASK)
237 old_len = PAGE_ALIGN(old_len);
238 new_len = PAGE_ALIGN(new_len);
240 if (old_len > TASK_SIZE || addr > TASK_SIZE - old_len)
243 if (addr >= TASK_SIZE)
246 /* new_addr is only valid if MREMAP_FIXED is specified */
247 if (flags & MREMAP_FIXED) {
248 if (new_addr & ~PAGE_MASK)
250 if (!(flags & MREMAP_MAYMOVE))
253 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
256 if (new_addr >= TASK_SIZE)
260 * Allow new_len == 0 only if new_addr == addr
261 * to preserve truncation in place (that was working
262 * safe and some app may depend on it).
264 if (unlikely(!new_len && new_addr != addr))
267 /* Check if the location we're moving into overlaps the
268 * old location at all, and fail if it does.
270 if ((new_addr <= addr) && (new_addr+new_len) > addr)
273 if ((addr <= new_addr) && (addr+old_len) > new_addr)
276 ret = do_munmap(current->mm, new_addr, new_len);
282 * Always allow a shrinking remap: that just unmaps
283 * the unnecessary pages..
285 if (old_len >= new_len) {
286 ret = do_munmap(current->mm, addr+new_len, old_len - new_len);
287 if (ret && old_len != new_len)
290 if (!(flags & MREMAP_FIXED) || (new_addr == addr))
295 * Ok, we need to grow.. or relocate.
298 vma = find_vma(current->mm, addr);
299 if (!vma || vma->vm_start > addr)
301 /* We can't remap across vm area boundaries */
302 if (old_len > vma->vm_end - addr)
304 if (vma->vm_flags & VM_DONTEXPAND) {
305 if (new_len > old_len)
308 if (vma->vm_flags & VM_LOCKED) {
309 unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
310 locked += new_len - old_len;
312 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
316 if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
317 > current->rlim[RLIMIT_AS].rlim_cur)
319 /* Private writable mapping? Check memory availability.. */
320 if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
321 !(flags & MAP_NORESERVE) &&
322 !vm_enough_memory((new_len - old_len) >> PAGE_SHIFT))
325 /* old_len exactly to the end of the area..
326 * And we're not relocating the area.
328 if (old_len == vma->vm_end - addr &&
329 !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
330 (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
331 unsigned long max_addr = TASK_SIZE;
333 max_addr = vma->vm_next->vm_start;
334 /* can we just expand the current mapping? */
335 if (max_addr - addr >= new_len) {
336 int pages = (new_len - old_len) >> PAGE_SHIFT;
337 spin_lock(&vma->vm_mm->page_table_lock);
338 vma->vm_end = addr + new_len;
339 spin_unlock(&vma->vm_mm->page_table_lock);
340 current->mm->total_vm += pages;
341 if (vma->vm_flags & VM_LOCKED) {
342 current->mm->locked_vm += pages;
343 make_pages_present(addr + old_len,
352 * We weren't able to just expand or shrink the area,
353 * we need to create a new one and move it..
356 if (flags & MREMAP_MAYMOVE) {
357 if (!(flags & MREMAP_FIXED)) {
358 unsigned long map_flags = 0;
359 if (vma->vm_flags & VM_SHARED)
360 map_flags |= MAP_SHARED;
362 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, vma->vm_pgoff, map_flags);
364 if (new_addr & ~PAGE_MASK)
367 ret = move_vma(vma, addr, old_len, new_len, new_addr);
373 asmlinkage unsigned long sys_mremap(unsigned long addr,
374 unsigned long old_len, unsigned long new_len,
375 unsigned long flags, unsigned long new_addr)
379 down_write(¤t->mm->mmap_sem);
380 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
381 up_write(¤t->mm->mmap_sem);