X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=mm%2Fmemory.c;h=e7066e71dfa3b36daeb4830e3032091e38f0bfc1;hb=f0e5ed7f323db4e47b51da4a23fad8245c173081;hp=bf6100236e6270336651229acbb2056021a68991;hpb=e9ccb79927225d8cd8d022a7c09bfb2fad935b89;p=powerpc.git diff --git a/mm/memory.c b/mm/memory.c index bf6100236e..e7066e71df 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -678,10 +678,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, if (pte_dirty(ptent)) set_page_dirty(page); if (pte_young(ptent)) - mark_page_accessed(page); + SetPageReferenced(page); file_rss--; } - page_remove_rmap(page); + page_remove_rmap(page, vma); tlb_remove_page(tlb, page); continue; } @@ -1091,7 +1091,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (pages) { pages[i] = page; - flush_anon_page(page, start); + flush_anon_page(vma, page, start); flush_dcache_page(page); } if (vmas) @@ -1277,6 +1277,51 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page * } EXPORT_SYMBOL(vm_insert_page); +/** + * vm_insert_pfn - insert single pfn into user vma + * @vma: user vma to map to + * @addr: target user address of this page + * @pfn: source kernel pfn + * + * Similar to vm_inert_page, this allows drivers to insert individual pages + * they've allocated into a user vma. Same comments apply. + * + * This function should only be called from a vm_ops->fault handler, and + * in that case the handler should return NULL. + */ +int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn) +{ + struct mm_struct *mm = vma->vm_mm; + int retval; + pte_t *pte, entry; + spinlock_t *ptl; + + BUG_ON(!(vma->vm_flags & VM_PFNMAP)); + BUG_ON(is_cow_mapping(vma->vm_flags)); + + retval = -ENOMEM; + pte = get_locked_pte(mm, addr, &ptl); + if (!pte) + goto out; + retval = -EBUSY; + if (!pte_none(*pte)) + goto out_unlock; + + /* Ok, finally just insert the thing.. */ + entry = pfn_pte(pfn, vma->vm_page_prot); + set_pte_at(mm, addr, pte, entry); + update_mmu_cache(vma, addr, entry); + + retval = 0; +out_unlock: + pte_unmap_unlock(pte, ptl); + +out: + return retval; +} +EXPORT_SYMBOL(vm_insert_pfn); + /* * maps a range of physical memory into the requested pages. the old * mappings are removed. any references to nonexistent pages results @@ -1441,7 +1486,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) return pte; } -static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va) +static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) { /* * If the source page was a PFN mapping, we don't have @@ -1464,9 +1509,9 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo kunmap_atomic(kaddr, KM_USER0); flush_dcache_page(dst); return; - + } - copy_user_highpage(dst, src, va); + copy_user_highpage(dst, src, va, vma); } /* @@ -1531,8 +1576,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_ops->page_mkwrite(vma, old_page) < 0) goto unwritable_page; - page_cache_release(old_page); - /* * Since we dropped the lock we need to revalidate * the PTE as someone else may have changed it. If @@ -1541,6 +1584,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + page_cache_release(old_page); if (!pte_same(*page_table, orig_pte)) goto unlock; } @@ -1577,7 +1621,7 @@ gotten: new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); if (!new_page) goto oom; - cow_user_page(new_page, old_page, address); + cow_user_page(new_page, old_page, address, vma); } /* @@ -1586,7 +1630,7 @@ gotten: page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) { if (old_page) { - page_remove_rmap(old_page); + page_remove_rmap(old_page, vma); if (!PageAnon(old_page)) { dec_mm_counter(mm, file_rss); inc_mm_counter(mm, anon_rss); @@ -1776,9 +1820,7 @@ restart: } /** - * unmap_mapping_range - unmap the portion of all mmaps - * in the specified address_space corresponding to the specified - * page range in the underlying file. + * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file. * @mapping: the address space containing mmaps to be unmapped. * @holebegin: byte in first page to unmap, relative to the start of * the underlying file. This will be rounded down to a PAGE_SIZE @@ -2200,7 +2242,7 @@ retry: page = alloc_page_vma(GFP_HIGHUSER, vma, address); if (!page) goto oom; - copy_user_highpage(page, new_page, address); + copy_user_highpage(page, new_page, address, vma); page_cache_release(new_page); new_page = page; anon = 1; @@ -2313,10 +2355,12 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma, BUG_ON(is_cow_mapping(vma->vm_flags)); pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK); - if (pfn == NOPFN_OOM) + if (unlikely(pfn == NOPFN_OOM)) return VM_FAULT_OOM; - if (pfn == NOPFN_SIGBUS) + else if (unlikely(pfn == NOPFN_SIGBUS)) return VM_FAULT_SIGBUS; + else if (unlikely(pfn == NOPFN_REFAULT)) + return VM_FAULT_MINOR; page_table = pte_offset_map_lock(mm, pmd, address, &ptl); @@ -2606,8 +2650,15 @@ static int __init gate_vma_init(void) gate_vma.vm_mm = NULL; gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; - gate_vma.vm_page_prot = PAGE_READONLY; - gate_vma.vm_flags = 0; + gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; + gate_vma.vm_page_prot = __P101; + /* + * Make sure the vDSO gets into every core dump. + * Dumping its contents makes post-mortem fully interpretable later + * without matching up the same kernel and hardware config to see + * what PC values meant. + */ + gate_vma.vm_flags |= VM_ALWAYSDUMP; return 0; } __initcall(gate_vma_init);