X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=mm%2Fmemory.c;h=4b0144b24c123681dcd9e95e715ae56b009355d7;hb=feed9bab7b14b77be8d796bcee95e2343fb82955;hp=4bf0b6d0eb2a675bbd1083099448d54979dff304;hpb=febb187761b02fce7d61b9c897d0e701f672b5ee;p=powerpc.git diff --git a/mm/memory.c b/mm/memory.c index 4bf0b6d0eb..4b0144b24c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -392,6 +392,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_ return NULL; } +#ifdef CONFIG_DEBUG_VM /* * Add some anal sanity checks for now. Eventually, * we should just do "return pfn_to_page(pfn)", but @@ -402,6 +403,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_ print_bad_pte(vma, pte, addr); return NULL; } +#endif /* * NOTE! We still have PageReserved() pages in the page @@ -1668,6 +1670,9 @@ gotten: unlock: pte_unmap_unlock(page_table, ptl); if (dirty_page) { + if (vma->vm_file) + file_update_time(vma->vm_file); + /* * Yes, Virginia, this is actually required to prevent a race * with clear_page_dirty_for_io() from clearing the page dirty @@ -2341,6 +2346,9 @@ out_unlocked: if (anon) page_cache_release(vmf.page); else if (dirty_page) { + if (vma->vm_file) + file_update_time(vma->vm_file); + set_page_dirty_balance(dirty_page, page_mkwrite); put_page(dirty_page); }