X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;ds=sidebyside;f=mm%2Fmmap.c;h=88da687bde89fdc51b367c1728d330b3321568ab;hb=14e072984179d3d421bf9ab75cc67e0961742841;hp=cc3a208194576482bb6f94223ba522c35daeed14;hpb=190ff5b3a168b666925897558998b5d97fec8731;p=powerpc.git diff --git a/mm/mmap.c b/mm/mmap.c index cc3a208194..88da687bde 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -29,6 +29,7 @@ #include #include #include +#include #ifndef arch_mmap_check #define arch_mmap_check(addr, len, flags) (0) @@ -299,6 +300,8 @@ static int browse_rb(struct rb_root *root) printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start); i++; pn = nd; + prev = vma->vm_start; + pend = vma->vm_end; } j = 0; for (nd = pn; nd; nd = rb_prev(nd)) { @@ -1977,6 +1980,9 @@ void exit_mmap(struct mm_struct *mm) unsigned long nr_accounted = 0; unsigned long end; + /* mm's last user has gone, and its about to be pulled down */ + arch_exit_mmap(mm); + lru_add_drain(); flush_cache_mm(mm); tlb = tlb_gather_mmu(mm, 1); @@ -2101,3 +2107,75 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages) return 0; return 1; } + + +static struct page *special_mapping_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + struct page **pages; + + BUG_ON(address < vma->vm_start || address >= vma->vm_end); + + address -= vma->vm_start; + for (pages = vma->vm_private_data; address > 0 && *pages; ++pages) + address -= PAGE_SIZE; + + if (*pages) { + struct page *page = *pages; + get_page(page); + return page; + } + + return NOPAGE_SIGBUS; +} + +/* + * Having a close hook prevents vma merging regardless of flags. + */ +static void special_mapping_close(struct vm_area_struct *vma) +{ +} + +static struct vm_operations_struct special_mapping_vmops = { + .close = special_mapping_close, + .nopage = special_mapping_nopage, +}; + +/* + * Called with mm->mmap_sem held for writing. + * Insert a new vma covering the given region, with the given flags. + * Its pages are supplied by the given array of struct page *. + * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. + * The region past the last page supplied will always produce SIGBUS. + * The array pointer and the pages it points to are assumed to stay alive + * for as long as this mapping might exist. + */ +int install_special_mapping(struct mm_struct *mm, + unsigned long addr, unsigned long len, + unsigned long vm_flags, struct page **pages) +{ + struct vm_area_struct *vma; + + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + if (unlikely(vma == NULL)) + return -ENOMEM; + + vma->vm_mm = mm; + vma->vm_start = addr; + vma->vm_end = addr + len; + + vma->vm_flags = vm_flags | mm->def_flags; + vma->vm_page_prot = protection_map[vma->vm_flags & 7]; + + vma->vm_ops = &special_mapping_vmops; + vma->vm_private_data = pages; + + if (unlikely(insert_vm_struct(mm, vma))) { + kmem_cache_free(vm_area_cachep, vma); + return -ENOMEM; + } + + mm->total_vm += len >> PAGE_SHIFT; + + return 0; +}