libata: convert legacy PCI host handling to new init model
[powerpc.git] / mm / memory.c
index 9cf3f34..e7066e7 100644 (file)
@@ -678,10 +678,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                if (pte_dirty(ptent))
                                        set_page_dirty(page);
                                if (pte_young(ptent))
-                                       mark_page_accessed(page);
+                                       SetPageReferenced(page);
                                file_rss--;
                        }
-                       page_remove_rmap(page);
+                       page_remove_rmap(page, vma);
                        tlb_remove_page(tlb, page);
                        continue;
                }
@@ -1086,11 +1086,12 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                default:
                                        BUG();
                                }
+                               cond_resched();
                        }
                        if (pages) {
                                pages[i] = page;
 
-                               flush_anon_page(page, start);
+                               flush_anon_page(vma, page, start);
                                flush_dcache_page(page);
                        }
                        if (vmas)
@@ -1109,23 +1110,29 @@ static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
 {
        pte_t *pte;
        spinlock_t *ptl;
+       int err = 0;
 
        pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
        if (!pte)
-               return -ENOMEM;
+               return -EAGAIN;
        arch_enter_lazy_mmu_mode();
        do {
                struct page *page = ZERO_PAGE(addr);
                pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
+
+               if (unlikely(!pte_none(*pte))) {
+                       err = -EEXIST;
+                       pte++;
+                       break;
+               }
                page_cache_get(page);
                page_add_file_rmap(page);
                inc_mm_counter(mm, file_rss);
-               BUG_ON(!pte_none(*pte));
                set_pte_at(mm, addr, pte, zero_pte);
        } while (pte++, addr += PAGE_SIZE, addr != end);
        arch_leave_lazy_mmu_mode();
        pte_unmap_unlock(pte - 1, ptl);
-       return 0;
+       return err;
 }
 
 static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
@@ -1133,16 +1140,18 @@ static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
 {
        pmd_t *pmd;
        unsigned long next;
+       int err;
 
        pmd = pmd_alloc(mm, pud, addr);
        if (!pmd)
-               return -ENOMEM;
+               return -EAGAIN;
        do {
                next = pmd_addr_end(addr, end);
-               if (zeromap_pte_range(mm, pmd, addr, next, prot))
-                       return -ENOMEM;
+               err = zeromap_pte_range(mm, pmd, addr, next, prot);
+               if (err)
+                       break;
        } while (pmd++, addr = next, addr != end);
-       return 0;
+       return err;
 }
 
 static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
@@ -1150,16 +1159,18 @@ static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
 {
        pud_t *pud;
        unsigned long next;
+       int err;
 
        pud = pud_alloc(mm, pgd, addr);
        if (!pud)
-               return -ENOMEM;
+               return -EAGAIN;
        do {
                next = pud_addr_end(addr, end);
-               if (zeromap_pmd_range(mm, pud, addr, next, prot))
-                       return -ENOMEM;
+               err = zeromap_pmd_range(mm, pud, addr, next, prot);
+               if (err)
+                       break;
        } while (pud++, addr = next, addr != end);
-       return 0;
+       return err;
 }
 
 int zeromap_page_range(struct vm_area_struct *vma,
@@ -1266,6 +1277,51 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *
 }
 EXPORT_SYMBOL(vm_insert_page);
 
+/**
+ * vm_insert_pfn - insert single pfn into user vma
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pfn: source kernel pfn
+ *
+ * Similar to vm_inert_page, this allows drivers to insert individual pages
+ * they've allocated into a user vma. Same comments apply.
+ *
+ * This function should only be called from a vm_ops->fault handler, and
+ * in that case the handler should return NULL.
+ */
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+               unsigned long pfn)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       int retval;
+       pte_t *pte, entry;
+       spinlock_t *ptl;
+
+       BUG_ON(!(vma->vm_flags & VM_PFNMAP));
+       BUG_ON(is_cow_mapping(vma->vm_flags));
+
+       retval = -ENOMEM;
+       pte = get_locked_pte(mm, addr, &ptl);
+       if (!pte)
+               goto out;
+       retval = -EBUSY;
+       if (!pte_none(*pte))
+               goto out_unlock;
+
+       /* Ok, finally just insert the thing.. */
+       entry = pfn_pte(pfn, vma->vm_page_prot);
+       set_pte_at(mm, addr, pte, entry);
+       update_mmu_cache(vma, addr, entry);
+
+       retval = 0;
+out_unlock:
+       pte_unmap_unlock(pte, ptl);
+
+out:
+       return retval;
+}
+EXPORT_SYMBOL(vm_insert_pfn);
+
 /*
  * maps a range of physical memory into the requested pages. the old
  * mappings are removed. any references to nonexistent pages results
@@ -1430,7 +1486,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
        return pte;
 }
 
-static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va)
+static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
 {
        /*
         * If the source page was a PFN mapping, we don't have
@@ -1451,10 +1507,11 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
                if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
                        memset(kaddr, 0, PAGE_SIZE);
                kunmap_atomic(kaddr, KM_USER0);
+               flush_dcache_page(dst);
                return;
-               
+
        }
-       copy_user_highpage(dst, src, va);
+       copy_user_highpage(dst, src, va, vma);
 }
 
 /*
@@ -1519,8 +1576,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
                                goto unwritable_page;
 
-                       page_cache_release(old_page);
-
                        /*
                         * Since we dropped the lock we need to revalidate
                         * the PTE as someone else may have changed it.  If
@@ -1529,6 +1584,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                         */
                        page_table = pte_offset_map_lock(mm, pmd, address,
                                                         &ptl);
+                       page_cache_release(old_page);
                        if (!pte_same(*page_table, orig_pte))
                                goto unlock;
                }
@@ -1565,7 +1621,7 @@ gotten:
                new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
                if (!new_page)
                        goto oom;
-               cow_user_page(new_page, old_page, address);
+               cow_user_page(new_page, old_page, address, vma);
        }
 
        /*
@@ -1574,7 +1630,7 @@ gotten:
        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (likely(pte_same(*page_table, orig_pte))) {
                if (old_page) {
-                       page_remove_rmap(old_page);
+                       page_remove_rmap(old_page, vma);
                        if (!PageAnon(old_page)) {
                                dec_mm_counter(mm, file_rss);
                                inc_mm_counter(mm, anon_rss);
@@ -1764,9 +1820,7 @@ restart:
 }
 
 /**
- * unmap_mapping_range - unmap the portion of all mmaps
- * in the specified address_space corresponding to the specified
- * page range in the underlying file.
+ * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
  * @mapping: the address space containing mmaps to be unmapped.
  * @holebegin: byte in first page to unmap, relative to the start of
  * the underlying file.  This will be rounded down to a PAGE_SIZE
@@ -1900,7 +1954,6 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
 
        return 0;
 }
-EXPORT_UNUSED_SYMBOL(vmtruncate_range);  /*  June 2006  */
 
 /**
  * swapin_readahead - swap in pages in hope we need them soon
@@ -1989,6 +2042,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        delayacct_set_flag(DELAYACCT_PF_SWAPIN);
        page = lookup_swap_cache(entry);
        if (!page) {
+               grab_swap_token(); /* Contend for token _before_ read-in */
                swapin_readahead(entry, address, vma);
                page = read_swap_cache_async(entry, vma, address);
                if (!page) {
@@ -2006,7 +2060,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                /* Had to read the page from swap area: Major fault */
                ret = VM_FAULT_MAJOR;
                count_vm_event(PGMAJFAULT);
-               grab_swap_token();
        }
 
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
@@ -2169,11 +2222,13 @@ retry:
         * after the next truncate_count read.
         */
 
-       /* no page was available -- either SIGBUS or OOM */
-       if (new_page == NOPAGE_SIGBUS)
+       /* no page was available -- either SIGBUS, OOM or REFAULT */
+       if (unlikely(new_page == NOPAGE_SIGBUS))
                return VM_FAULT_SIGBUS;
-       if (new_page == NOPAGE_OOM)
+       else if (unlikely(new_page == NOPAGE_OOM))
                return VM_FAULT_OOM;
+       else if (unlikely(new_page == NOPAGE_REFAULT))
+               return VM_FAULT_MINOR;
 
        /*
         * Should we do an early C-O-W break?
@@ -2187,7 +2242,7 @@ retry:
                        page = alloc_page_vma(GFP_HIGHUSER, vma, address);
                        if (!page)
                                goto oom;
-                       copy_user_highpage(page, new_page, address);
+                       copy_user_highpage(page, new_page, address, vma);
                        page_cache_release(new_page);
                        new_page = page;
                        anon = 1;
@@ -2300,10 +2355,12 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
        BUG_ON(is_cow_mapping(vma->vm_flags));
 
        pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
-       if (pfn == NOPFN_OOM)
+       if (unlikely(pfn == NOPFN_OOM))
                return VM_FAULT_OOM;
-       if (pfn == NOPFN_SIGBUS)
+       else if (unlikely(pfn == NOPFN_SIGBUS))
                return VM_FAULT_SIGBUS;
+       else if (unlikely(pfn == NOPFN_REFAULT))
+               return VM_FAULT_MINOR;
 
        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
 
@@ -2593,8 +2650,15 @@ static int __init gate_vma_init(void)
        gate_vma.vm_mm = NULL;
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
-       gate_vma.vm_page_prot = PAGE_READONLY;
-       gate_vma.vm_flags = 0;
+       gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+       gate_vma.vm_page_prot = __P101;
+       /*
+        * Make sure the vDSO gets into every core dump.
+        * Dumping its contents makes post-mortem fully interpretable later
+        * without matching up the same kernel and hardware config to see
+        * what PC values meant.
+        */
+       gate_vma.vm_flags |= VM_ALWAYSDUMP;
        return 0;
 }
 __initcall(gate_vma_init);