6 #include <linux/slab.h>
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/swap.h>
11 #include <linux/swapctl.h>
12 #include <linux/smp_lock.h>
13 #include <linux/init.h>
14 #include <linux/file.h>
16 #include <linux/personality.h>
18 #include <asm/uaccess.h>
19 #include <asm/pgalloc.h>
22 * WARNING: the debugging will use recursive algorithms so never enable this
23 * unless you know what you are doing.
27 /* description of effects of mapping type and prot in current implementation.
28 * this is due to the limited x86 page protection hardware. The expected
29 * behavior is in parens:
32 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
33 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
34 * w: (no) no w: (no) no w: (yes) yes w: (no) no
35 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
37 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
38 * w: (no) no w: (no) no w: (copy) copy w: (no) no
39 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
42 pgprot_t protection_map[16] = {
43 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
44 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
47 int sysctl_overcommit_memory;
48 int max_map_count = DEFAULT_MAX_MAP_COUNT;
50 /* Check that a process has enough memory to allocate a
51 * new virtual mapping.
53 int vm_enough_memory(long pages)
55 /* Stupid algorithm to decide if we have enough memory: while
56 * simple, it hopefully works in most obvious cases.. Easy to
57 * fool it, but this should catch most mistakes.
59 /* 23/11/98 NJC: Somewhat less stupid version of algorithm,
60 * which tries to do "TheRightThing". Instead of using half of
61 * (buffers+cache), use the minimum values. Allow an extra 2%
62 * of num_physpages for safety margin.
67 /* Sometimes we want to use more memory than we have. */
68 if (sysctl_overcommit_memory)
71 /* The page cache contains buffer pages these days.. */
72 free = atomic_read(&page_cache_size);
73 free += nr_free_pages();
74 free += nr_swap_pages;
77 * This double-counts: the nrpages are both in the page-cache
78 * and in the swapper space. At the same time, this compensates
79 * for the swap-space over-allocation (ie "nr_swap_pages" being
82 free += swapper_space.nrpages;
85 * The code below doesn't account for free space in the inode
86 * and dentry slab cache, slab cache fragmentation, inodes and
87 * dentries which will become freeable under VM load, etc.
88 * Lets just hope all these (complex) factors balance out...
90 free += (dentry_stat.nr_unused * sizeof(struct dentry)) >> PAGE_SHIFT;
91 free += (inodes_stat.nr_unused * sizeof(struct inode)) >> PAGE_SHIFT;
96 /* Remove one vm structure from the inode's i_mapping address space. */
97 static inline void __remove_shared_vm_struct(struct vm_area_struct *vma)
99 struct file * file = vma->vm_file;
102 struct inode *inode = file->f_dentry->d_inode;
103 if (vma->vm_flags & VM_DENYWRITE)
104 atomic_inc(&inode->i_writecount);
105 if(vma->vm_next_share)
106 vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share;
107 *vma->vm_pprev_share = vma->vm_next_share;
111 static inline void remove_shared_vm_struct(struct vm_area_struct *vma)
113 lock_vma_mappings(vma);
114 __remove_shared_vm_struct(vma);
115 unlock_vma_mappings(vma);
118 void lock_vma_mappings(struct vm_area_struct *vma)
120 struct address_space *mapping;
124 mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
126 spin_lock(&mapping->i_shared_lock);
129 void unlock_vma_mappings(struct vm_area_struct *vma)
131 struct address_space *mapping;
135 mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
137 spin_unlock(&mapping->i_shared_lock);
141 * sys_brk() for the most part doesn't need the global kernel
142 * lock, except when an application is doing something nasty
143 * like trying to un-brk an area that has already been mapped
144 * to a regular file. in this case, the unmapping will need
145 * to invoke file system routines that need the global lock.
147 asmlinkage unsigned long sys_brk(unsigned long brk)
149 unsigned long rlim, retval;
150 unsigned long newbrk, oldbrk;
151 struct mm_struct *mm = current->mm;
153 down_write(&mm->mmap_sem);
155 if (brk < mm->end_code)
157 newbrk = PAGE_ALIGN(brk);
158 oldbrk = PAGE_ALIGN(mm->brk);
159 if (oldbrk == newbrk)
162 /* Always allow shrinking brk. */
163 if (brk <= mm->brk) {
164 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
169 /* Check against rlimit.. */
170 rlim = current->rlim[RLIMIT_DATA].rlim_cur;
171 if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
174 /* Check against existing mmap mappings. */
175 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
178 /* Check if we have enough memory.. */
179 if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))
182 /* Ok, looks good - let it rip. */
183 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
189 up_write(&mm->mmap_sem);
193 /* Combine the mmap "prot" and "flags" argument into one "vm_flags" used
194 * internally. Essentially, translate the "PROT_xxx" and "MAP_xxx" bits
197 static inline unsigned long calc_vm_flags(unsigned long prot, unsigned long flags)
199 #define _trans(x,bit1,bit2) \
200 ((bit1==bit2)?(x&bit1):(x&bit1)?bit2:0)
202 unsigned long prot_bits, flag_bits;
204 _trans(prot, PROT_READ, VM_READ) |
205 _trans(prot, PROT_WRITE, VM_WRITE) |
206 _trans(prot, PROT_EXEC, VM_EXEC);
208 _trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
209 _trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
210 _trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
211 return prot_bits | flag_bits;
216 static int browse_rb(rb_node_t * rb_node) {
220 i += browse_rb(rb_node->rb_left);
221 i += browse_rb(rb_node->rb_right);
226 static void validate_mm(struct mm_struct * mm) {
229 struct vm_area_struct * tmp = mm->mmap;
234 if (i != mm->map_count)
235 printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
236 i = browse_rb(mm->mm_rb.rb_node);
237 if (i != mm->map_count)
238 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
243 #define validate_mm(mm) do { } while (0)
246 static struct vm_area_struct * find_vma_prepare(struct mm_struct * mm, unsigned long addr,
247 struct vm_area_struct ** pprev,
248 rb_node_t *** rb_link, rb_node_t ** rb_parent)
250 struct vm_area_struct * vma;
251 rb_node_t ** __rb_link, * __rb_parent, * rb_prev;
253 __rb_link = &mm->mm_rb.rb_node;
254 rb_prev = __rb_parent = NULL;
258 struct vm_area_struct *vma_tmp;
260 __rb_parent = *__rb_link;
261 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
263 if (vma_tmp->vm_end > addr) {
265 if (vma_tmp->vm_start <= addr)
267 __rb_link = &__rb_parent->rb_left;
269 rb_prev = __rb_parent;
270 __rb_link = &__rb_parent->rb_right;
276 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
277 *rb_link = __rb_link;
278 *rb_parent = __rb_parent;
282 static inline void __vma_link_list(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
283 rb_node_t * rb_parent)
286 vma->vm_next = prev->vm_next;
291 vma->vm_next = rb_entry(rb_parent, struct vm_area_struct, vm_rb);
297 static inline void __vma_link_rb(struct mm_struct * mm, struct vm_area_struct * vma,
298 rb_node_t ** rb_link, rb_node_t * rb_parent)
300 rb_link_node(&vma->vm_rb, rb_parent, rb_link);
301 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
304 static inline void __vma_link_file(struct vm_area_struct * vma)
310 struct inode * inode = file->f_dentry->d_inode;
311 struct address_space *mapping = inode->i_mapping;
312 struct vm_area_struct **head;
314 if (vma->vm_flags & VM_DENYWRITE)
315 atomic_dec(&inode->i_writecount);
317 head = &mapping->i_mmap;
318 if (vma->vm_flags & VM_SHARED)
319 head = &mapping->i_mmap_shared;
321 /* insert vma into inode's share list */
322 if((vma->vm_next_share = *head) != NULL)
323 (*head)->vm_pprev_share = &vma->vm_next_share;
325 vma->vm_pprev_share = head;
329 static void __vma_link(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
330 rb_node_t ** rb_link, rb_node_t * rb_parent)
332 __vma_link_list(mm, vma, prev, rb_parent);
333 __vma_link_rb(mm, vma, rb_link, rb_parent);
334 __vma_link_file(vma);
337 static inline void vma_link(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
338 rb_node_t ** rb_link, rb_node_t * rb_parent)
340 lock_vma_mappings(vma);
341 spin_lock(&mm->page_table_lock);
342 __vma_link(mm, vma, prev, rb_link, rb_parent);
343 spin_unlock(&mm->page_table_lock);
344 unlock_vma_mappings(vma);
350 static int vma_merge(struct mm_struct * mm, struct vm_area_struct * prev,
351 rb_node_t * rb_parent, unsigned long addr, unsigned long end, unsigned long vm_flags)
353 spinlock_t * lock = &mm->page_table_lock;
355 prev = rb_entry(rb_parent, struct vm_area_struct, vm_rb);
358 if (prev->vm_end == addr && can_vma_merge(prev, vm_flags)) {
359 struct vm_area_struct * next;
363 next = prev->vm_next;
364 if (next && prev->vm_end == next->vm_start && can_vma_merge(next, vm_flags)) {
365 prev->vm_end = next->vm_end;
366 __vma_unlink(mm, next, prev);
370 kmem_cache_free(vm_area_cachep, next);
377 prev = prev->vm_next;
380 if (!can_vma_merge(prev, vm_flags))
382 if (end == prev->vm_start) {
384 prev->vm_start = addr;
393 unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned long len,
394 unsigned long prot, unsigned long flags, unsigned long pgoff)
396 struct mm_struct * mm = current->mm;
397 struct vm_area_struct * vma, * prev;
398 unsigned int vm_flags;
399 int correct_wcount = 0;
401 rb_node_t ** rb_link, * rb_parent;
403 if (file && (!file->f_op || !file->f_op->mmap))
406 if ((len = PAGE_ALIGN(len)) == 0)
412 /* offset overflow? */
413 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
416 /* Too many mappings? */
417 if (mm->map_count > max_map_count)
420 /* Obtain the address to map to. we verify (or select) it and ensure
421 * that it represents a valid section of the address space.
423 addr = get_unmapped_area(file, addr, len, pgoff, flags);
424 if (addr & ~PAGE_MASK)
427 /* Do simple checking here so the lower-level routines won't have
428 * to. we assume access permissions have been handled by the open
429 * of the memory object, so we don't do any here.
431 vm_flags = calc_vm_flags(prot,flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
433 /* mlock MCL_FUTURE? */
434 if (vm_flags & VM_LOCKED) {
435 unsigned long locked = mm->locked_vm << PAGE_SHIFT;
437 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
442 switch (flags & MAP_TYPE) {
444 if ((prot & PROT_WRITE) && !(file->f_mode & FMODE_WRITE))
447 /* Make sure we don't allow writing to an append-only file.. */
448 if (IS_APPEND(file->f_dentry->d_inode) && (file->f_mode & FMODE_WRITE))
451 /* make sure there are no mandatory locks on the file. */
452 if (locks_verify_locked(file->f_dentry->d_inode))
455 vm_flags |= VM_SHARED | VM_MAYSHARE;
456 if (!(file->f_mode & FMODE_WRITE))
457 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
461 if (!(file->f_mode & FMODE_READ))
469 vm_flags |= VM_SHARED | VM_MAYSHARE;
470 switch (flags & MAP_TYPE) {
474 vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
483 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
484 if (vma && vma->vm_start < addr + len) {
485 if (do_munmap(mm, addr, len))
490 /* Check against address space limit. */
491 if ((mm->total_vm << PAGE_SHIFT) + len
492 > current->rlim[RLIMIT_AS].rlim_cur)
495 /* Private writable mapping? Check memory availability.. */
496 if ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE &&
497 !(flags & MAP_NORESERVE) &&
498 !vm_enough_memory(len >> PAGE_SHIFT))
501 /* Can we just expand an old anonymous mapping? */
502 if (!file && !(vm_flags & VM_SHARED) && rb_parent)
503 if (vma_merge(mm, prev, rb_parent, addr, addr + len, vm_flags))
506 /* Determine the object being mapped and call the appropriate
507 * specific mapper. the address has already been validated, but
508 * not unmapped, but the maps are removed from the list.
510 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
515 vma->vm_start = addr;
516 vma->vm_end = addr + len;
517 vma->vm_flags = vm_flags;
518 vma->vm_page_prot = protection_map[vm_flags & 0x0f];
520 vma->vm_pgoff = pgoff;
522 vma->vm_private_data = NULL;
527 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
529 if (vm_flags & VM_DENYWRITE) {
530 error = deny_write_access(file);
537 error = file->f_op->mmap(file, vma);
539 goto unmap_and_free_vma;
540 } else if (flags & MAP_SHARED) {
541 error = shmem_zero_setup(vma);
546 /* Can addr have changed??
548 * Answer: Yes, several device drivers can do it in their
549 * f_op->mmap method. -DaveM
551 if (addr != vma->vm_start) {
553 * It is a bit too late to pretend changing the virtual
554 * area of the mapping, we just corrupted userspace
555 * in the do_munmap, so FIXME (not in 2.4 to avoid breaking
558 struct vm_area_struct * stale_vma;
559 /* Since addr changed, we rely on the mmap op to prevent
560 * collisions with existing vmas and just use find_vma_prepare
561 * to update the tree pointers.
563 addr = vma->vm_start;
564 stale_vma = find_vma_prepare(mm, addr, &prev,
565 &rb_link, &rb_parent);
567 * Make sure the lowlevel driver did its job right.
569 if (unlikely(stale_vma && stale_vma->vm_start < vma->vm_end)) {
570 printk(KERN_ERR "buggy mmap operation: [<%p>]\n",
571 file ? file->f_op->mmap : NULL);
576 vma_link(mm, vma, prev, rb_link, rb_parent);
578 atomic_inc(&file->f_dentry->d_inode->i_writecount);
581 mm->total_vm += len >> PAGE_SHIFT;
582 if (vm_flags & VM_LOCKED) {
583 mm->locked_vm += len >> PAGE_SHIFT;
584 make_pages_present(addr, addr + len);
590 atomic_inc(&file->f_dentry->d_inode->i_writecount);
594 /* Undo any partial mapping done by a device driver. */
595 zap_page_range(mm, vma->vm_start, vma->vm_end - vma->vm_start);
597 kmem_cache_free(vm_area_cachep, vma);
601 /* Get an address range which is currently unmapped.
602 * For shmat() with addr=0.
604 * Ugly calling convention alert:
605 * Return value with the low bits set means error value,
607 * if (ret & ~PAGE_MASK)
610 * This function "knows" that -ENOMEM has the bits set.
612 #ifndef HAVE_ARCH_UNMAPPED_AREA
613 static inline unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
615 struct vm_area_struct *vma;
621 addr = PAGE_ALIGN(addr);
622 vma = find_vma(current->mm, addr);
623 if (TASK_SIZE - len >= addr &&
624 (!vma || addr + len <= vma->vm_start))
627 addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
629 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
630 /* At this point: (!vma || addr < vma->vm_end). */
631 if (TASK_SIZE - len < addr)
633 if (!vma || addr + len <= vma->vm_start)
639 extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
642 unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
644 if (flags & MAP_FIXED) {
645 if (addr > TASK_SIZE - len)
647 if (addr & ~PAGE_MASK)
652 if (file && file->f_op && file->f_op->get_unmapped_area)
653 return file->f_op->get_unmapped_area(file, addr, len, pgoff, flags);
655 return arch_get_unmapped_area(file, addr, len, pgoff, flags);
658 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
659 struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
661 struct vm_area_struct *vma = NULL;
664 /* Check the cache first. */
665 /* (Cache hit rate is typically around 35%.) */
666 vma = mm->mmap_cache;
667 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
670 rb_node = mm->mm_rb.rb_node;
674 struct vm_area_struct * vma_tmp;
676 vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
678 if (vma_tmp->vm_end > addr) {
680 if (vma_tmp->vm_start <= addr)
682 rb_node = rb_node->rb_left;
684 rb_node = rb_node->rb_right;
687 mm->mmap_cache = vma;
693 /* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
694 struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
695 struct vm_area_struct **pprev)
698 /* Go through the RB tree quickly. */
699 struct vm_area_struct * vma;
700 rb_node_t * rb_node, * rb_last_right, * rb_prev;
702 rb_node = mm->mm_rb.rb_node;
703 rb_last_right = rb_prev = NULL;
707 struct vm_area_struct * vma_tmp;
709 vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
711 if (vma_tmp->vm_end > addr) {
713 rb_prev = rb_last_right;
714 if (vma_tmp->vm_start <= addr)
716 rb_node = rb_node->rb_left;
718 rb_last_right = rb_node;
719 rb_node = rb_node->rb_right;
723 if (vma->vm_rb.rb_left) {
724 rb_prev = vma->vm_rb.rb_left;
725 while (rb_prev->rb_right)
726 rb_prev = rb_prev->rb_right;
730 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
731 if ((rb_prev ? (*pprev)->vm_next : mm->mmap) != vma)
740 struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr)
742 struct vm_area_struct * vma;
746 vma = find_vma(mm,addr);
749 if (vma->vm_start <= addr)
751 if (!(vma->vm_flags & VM_GROWSDOWN))
753 start = vma->vm_start;
754 if (expand_stack(vma, addr))
756 if (vma->vm_flags & VM_LOCKED) {
757 make_pages_present(addr, start);
762 /* Normal function to fix up a mapping
763 * This function is the default for when an area has no specific
764 * function. This may be used as part of a more specific routine.
765 * This function works out what part of an area is affected and
766 * adjusts the mapping information. Since the actual page
767 * manipulation is done in do_mmap(), none need be done here,
768 * though it would probably be more appropriate.
770 * By the time this function is called, the area struct has been
771 * removed from the process mapping list, so it needs to be
772 * reinserted if necessary.
774 * The 4 main cases are:
775 * Unmapping the whole area
776 * Unmapping from the start of the segment to a point in it
777 * Unmapping from an intermediate point to the end
778 * Unmapping between to intermediate points, making a hole.
780 * Case 4 involves the creation of 2 new areas, for each side of
781 * the hole. If possible, we reuse the existing area rather than
782 * allocate a new one, and the return indicates whether the old
785 static struct vm_area_struct * unmap_fixup(struct mm_struct *mm,
786 struct vm_area_struct *area, unsigned long addr, size_t len,
787 struct vm_area_struct *extra)
789 struct vm_area_struct *mpnt;
790 unsigned long end = addr + len;
792 area->vm_mm->total_vm -= len >> PAGE_SHIFT;
793 if (area->vm_flags & VM_LOCKED)
794 area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
796 /* Unmapping the whole area. */
797 if (addr == area->vm_start && end == area->vm_end) {
798 if (area->vm_ops && area->vm_ops->close)
799 area->vm_ops->close(area);
802 kmem_cache_free(vm_area_cachep, area);
806 /* Work out to one of the ends. */
807 if (end == area->vm_end) {
809 * here area isn't visible to the semaphore-less readers
810 * so we don't need to update it under the spinlock.
813 lock_vma_mappings(area);
814 spin_lock(&mm->page_table_lock);
815 } else if (addr == area->vm_start) {
816 area->vm_pgoff += (end - area->vm_start) >> PAGE_SHIFT;
817 /* same locking considerations of the above case */
818 area->vm_start = end;
819 lock_vma_mappings(area);
820 spin_lock(&mm->page_table_lock);
822 /* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
823 /* Add end mapping -- leave beginning for below */
827 mpnt->vm_mm = area->vm_mm;
828 mpnt->vm_start = end;
829 mpnt->vm_end = area->vm_end;
830 mpnt->vm_page_prot = area->vm_page_prot;
831 mpnt->vm_flags = area->vm_flags;
833 mpnt->vm_ops = area->vm_ops;
834 mpnt->vm_pgoff = area->vm_pgoff + ((end - area->vm_start) >> PAGE_SHIFT);
835 mpnt->vm_file = area->vm_file;
836 mpnt->vm_private_data = area->vm_private_data;
838 get_file(mpnt->vm_file);
839 if (mpnt->vm_ops && mpnt->vm_ops->open)
840 mpnt->vm_ops->open(mpnt);
841 area->vm_end = addr; /* Truncate area */
843 /* Because mpnt->vm_file == area->vm_file this locks
846 lock_vma_mappings(area);
847 spin_lock(&mm->page_table_lock);
848 __insert_vm_struct(mm, mpnt);
851 __insert_vm_struct(mm, area);
852 spin_unlock(&mm->page_table_lock);
853 unlock_vma_mappings(area);
858 * Try to free as many page directory entries as we can,
859 * without having to work very hard at actually scanning
860 * the page tables themselves.
862 * Right now we try to free page tables if we have a nice
863 * PGDIR-aligned area that got free'd up. We could be more
864 * granular if we want to, but this is fast and simple,
865 * and covers the bad cases.
867 * "prev", if it exists, points to a vma before the one
868 * we just free'd - but there's no telling how much before.
870 static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev,
871 unsigned long start, unsigned long end)
873 unsigned long first = start & PGDIR_MASK;
874 unsigned long last = end + PGDIR_SIZE - 1;
875 unsigned long start_index, end_index;
881 if (prev->vm_end > start) {
882 if (last > prev->vm_start)
883 last = prev->vm_start;
888 struct vm_area_struct *next = prev->vm_next;
891 if (next->vm_start < start) {
895 if (last > next->vm_start)
896 last = next->vm_start;
898 if (prev->vm_end > first)
899 first = prev->vm_end + PGDIR_SIZE - 1;
904 * If the PGD bits are not consecutive in the virtual address, the
905 * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
907 start_index = pgd_index(first);
908 end_index = pgd_index(last);
909 if (end_index > start_index) {
910 clear_page_tables(mm, start_index, end_index - start_index);
911 flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK);
915 /* Munmap is split into 2 main parts -- this part which finds
916 * what needs doing, and the areas themselves, which do the
917 * work. This now handles partial unmappings.
918 * Jeremy Fitzhardine <jeremy@sw.oz.au>
920 int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
922 struct vm_area_struct *mpnt, *prev, **npp, *free, *extra;
924 if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
927 if ((len = PAGE_ALIGN(len)) == 0)
930 /* Check if this memory area is ok - put it on the temporary
931 * list if so.. The checks here are pretty simple --
932 * every area affected in some way (by any overlap) is put
933 * on the list. If nothing is put on, nothing is affected.
935 mpnt = find_vma_prev(mm, addr, &prev);
938 /* we have addr < mpnt->vm_end */
940 if (mpnt->vm_start >= addr+len)
943 /* If we'll make "hole", check the vm areas limit */
944 if ((mpnt->vm_start < addr && mpnt->vm_end > addr+len)
945 && mm->map_count >= max_map_count)
949 * We may need one additional vma to fix up the mappings ...
950 * and this is the last chance for an easy error exit.
952 extra = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
956 npp = (prev ? &prev->vm_next : &mm->mmap);
958 spin_lock(&mm->page_table_lock);
959 for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
960 *npp = mpnt->vm_next;
961 mpnt->vm_next = free;
963 rb_erase(&mpnt->vm_rb, &mm->mm_rb);
965 mm->mmap_cache = NULL; /* Kill the cache. */
966 spin_unlock(&mm->page_table_lock);
968 /* Ok - we have the memory areas we should free on the 'free' list,
969 * so release them, and unmap the page range..
970 * If the one of the segments is only being partially unmapped,
971 * it will put new vm_area_struct(s) into the address space.
972 * In that case we have to be careful with VM_DENYWRITE.
974 while ((mpnt = free) != NULL) {
975 unsigned long st, end, size;
976 struct file *file = NULL;
978 free = free->vm_next;
980 st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
982 end = end > mpnt->vm_end ? mpnt->vm_end : end;
985 if (mpnt->vm_flags & VM_DENYWRITE &&
986 (st != mpnt->vm_start || end != mpnt->vm_end) &&
987 (file = mpnt->vm_file) != NULL) {
988 atomic_dec(&file->f_dentry->d_inode->i_writecount);
990 remove_shared_vm_struct(mpnt);
993 zap_page_range(mm, st, size);
996 * Fix the mapping, and free the old area if it wasn't reused.
998 extra = unmap_fixup(mm, mpnt, st, size, extra);
1000 atomic_inc(&file->f_dentry->d_inode->i_writecount);
1004 /* Release the extra vma struct if it wasn't used */
1006 kmem_cache_free(vm_area_cachep, extra);
1008 free_pgtables(mm, prev, addr, addr+len);
1013 asmlinkage long sys_munmap(unsigned long addr, size_t len)
1016 struct mm_struct *mm = current->mm;
1018 down_write(&mm->mmap_sem);
1019 ret = do_munmap(mm, addr, len);
1020 up_write(&mm->mmap_sem);
1025 * this is really a simplified "do_mmap". it only handles
1026 * anonymous maps. eventually we may be able to do some
1027 * brk-specific accounting here.
1029 unsigned long do_brk(unsigned long addr, unsigned long len)
1031 struct mm_struct * mm = current->mm;
1032 struct vm_area_struct * vma, * prev;
1033 unsigned long flags;
1034 rb_node_t ** rb_link, * rb_parent;
1036 len = PAGE_ALIGN(len);
1043 if (mm->def_flags & VM_LOCKED) {
1044 unsigned long locked = mm->locked_vm << PAGE_SHIFT;
1046 if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
1051 * Clear old maps. this also does some error checking for us
1054 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1055 if (vma && vma->vm_start < addr + len) {
1056 if (do_munmap(mm, addr, len))
1061 /* Check against address space limits *after* clearing old maps... */
1062 if ((mm->total_vm << PAGE_SHIFT) + len
1063 > current->rlim[RLIMIT_AS].rlim_cur)
1066 if (mm->map_count > max_map_count)
1069 if (!vm_enough_memory(len >> PAGE_SHIFT))
1072 flags = VM_DATA_DEFAULT_FLAGS | mm->def_flags;
1074 /* Can we just expand an old anonymous mapping? */
1075 if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len, flags))
1079 * create a vma struct for an anonymous mapping
1081 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
1086 vma->vm_start = addr;
1087 vma->vm_end = addr + len;
1088 vma->vm_flags = flags;
1089 vma->vm_page_prot = protection_map[flags & 0x0f];
1092 vma->vm_file = NULL;
1093 vma->vm_private_data = NULL;
1095 vma_link(mm, vma, prev, rb_link, rb_parent);
1098 mm->total_vm += len >> PAGE_SHIFT;
1099 if (flags & VM_LOCKED) {
1100 mm->locked_vm += len >> PAGE_SHIFT;
1101 make_pages_present(addr, addr + len);
1106 /* Build the RB tree corresponding to the VMA list. */
1107 void build_mmap_rb(struct mm_struct * mm)
1109 struct vm_area_struct * vma;
1110 rb_node_t ** rb_link, * rb_parent;
1112 mm->mm_rb = RB_ROOT;
1113 rb_link = &mm->mm_rb.rb_node;
1115 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1116 __vma_link_rb(mm, vma, rb_link, rb_parent);
1117 rb_parent = &vma->vm_rb;
1118 rb_link = &rb_parent->rb_right;
1122 /* Release all mmaps. */
1123 void exit_mmap(struct mm_struct * mm)
1125 struct vm_area_struct * mpnt;
1127 release_segments(mm);
1128 spin_lock(&mm->page_table_lock);
1130 mm->mmap = mm->mmap_cache = NULL;
1131 mm->mm_rb = RB_ROOT;
1133 spin_unlock(&mm->page_table_lock);
1139 struct vm_area_struct * next = mpnt->vm_next;
1140 unsigned long start = mpnt->vm_start;
1141 unsigned long end = mpnt->vm_end;
1142 unsigned long size = end - start;
1145 if (mpnt->vm_ops->close)
1146 mpnt->vm_ops->close(mpnt);
1149 remove_shared_vm_struct(mpnt);
1150 zap_page_range(mm, start, size);
1152 fput(mpnt->vm_file);
1153 kmem_cache_free(vm_area_cachep, mpnt);
1157 /* This is just debugging */
1161 clear_page_tables(mm, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
1166 /* Insert vm structure into process list sorted by address
1167 * and into the inode's i_mmap ring. If vm_file is non-NULL
1168 * then the i_shared_lock must be held here.
1170 void __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
1172 struct vm_area_struct * __vma, * prev;
1173 rb_node_t ** rb_link, * rb_parent;
1175 __vma = find_vma_prepare(mm, vma->vm_start, &prev, &rb_link, &rb_parent);
1176 if (__vma && __vma->vm_start < vma->vm_end)
1178 __vma_link(mm, vma, prev, rb_link, rb_parent);
1183 void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
1185 struct vm_area_struct * __vma, * prev;
1186 rb_node_t ** rb_link, * rb_parent;
1188 __vma = find_vma_prepare(mm, vma->vm_start, &prev, &rb_link, &rb_parent);
1189 if (__vma && __vma->vm_start < vma->vm_end)
1191 vma_link(mm, vma, prev, rb_link, rb_parent);