X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=mm%2Fmemory.c;h=4b0144b24c123681dcd9e95e715ae56b009355d7;hb=feed9bab7b14b77be8d796bcee95e2343fb82955;hp=9791e4786843f40438a97910728ec580664d1d4e;hpb=f8a9efb52847433c6a2e1598d78e49c42a9aa3c5;p=powerpc.git diff --git a/mm/memory.c b/mm/memory.c index 9791e47868..4b0144b24c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -392,6 +392,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_ return NULL; } +#ifdef CONFIG_DEBUG_VM /* * Add some anal sanity checks for now. Eventually, * we should just do "return pfn_to_page(pfn)", but @@ -402,6 +403,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_ print_bad_pte(vma, pte, addr); return NULL; } +#endif /* * NOTE! We still have PageReserved() pages in the page @@ -1036,7 +1038,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (is_vm_hugetlb_page(vma)) { i = follow_hugetlb_page(mm, vma, pages, vmas, - &start, &len, i); + &start, &len, i, write); continue; } @@ -1668,6 +1670,9 @@ gotten: unlock: pte_unmap_unlock(page_table, ptl); if (dirty_page) { + if (vma->vm_file) + file_update_time(vma->vm_file); + /* * Yes, Virginia, this is actually required to prevent a race * with clear_page_dirty_for_io() from clearing the page dirty @@ -2084,9 +2089,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, count_vm_event(PGMAJFAULT); } - delayacct_clear_flag(DELAYACCT_PF_SWAPIN); mark_page_accessed(page); lock_page(page); + delayacct_clear_flag(DELAYACCT_PF_SWAPIN); /* * Back out if somebody else already faulted in this pte. @@ -2341,6 +2346,9 @@ out_unlocked: if (anon) page_cache_release(vmf.page); else if (dirty_page) { + if (vma->vm_file) + file_update_time(vma->vm_file); + set_page_dirty_balance(dirty_page, page_mkwrite); put_page(dirty_page); }