X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=mm%2Ftruncate.c;h=0f4b6d18ab0ed663360e0fba11f46f23e8c6b5b3;hb=06abdfb47ee745a4d79721de24260815ec6bca2b;hp=6c79ca4a1ca7cfe9226e46f651e752db78ef88c6;hpb=5b94d541806da24c8dbbff629486d65ce71dec46;p=powerpc.git diff --git a/mm/truncate.c b/mm/truncate.c index 6c79ca4a1c..0f4b6d18ab 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -51,15 +51,22 @@ static inline void truncate_partial_page(struct page *page, unsigned partial) do_invalidatepage(page, partial); } +/* + * This cancels just the dirty bit on the kernel page itself, it + * does NOT actually remove dirty bits on any mmap's that may be + * around. It also leaves the page tagged dirty, so any sync + * activity will still find it on the dirty lists, and in particular, + * clear_page_dirty_for_io() will still look at the dirty bits in + * the VM. + * + * Doing this should *normally* only ever be done when a page + * is truncated, and is not actually mapped anywhere at all. However, + * fs/buffer.c does this when it notices that somebody has cleaned + * out all the buffers on a page without actually doing it through + * the VM. Can you say "ext3 is horribly ugly"? Tought you could. + */ void cancel_dirty_page(struct page *page, unsigned int account_size) { - /* If we're cancelling the page, it had better not be mapped any more */ - if (page_mapped(page)) { - static unsigned int warncount; - - WARN_ON(++warncount < 5); - } - if (TestClearPageDirty(page)) { struct address_space *mapping = page->mapping; if (mapping && mapping_cap_account_dirty(mapping)) { @@ -78,7 +85,7 @@ EXPORT_SYMBOL(cancel_dirty_page); * * We need to bale out if page->mapping is no longer equal to the original * mapping. This happens a) when the VM reclaimed the page while we waited on - * its lock, b) when a concurrent invalidate_inode_pages got there first and + * its lock, b) when a concurrent invalidate_mapping_pages got there first and * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. */ static void @@ -99,7 +106,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page) } /* - * This is for invalidate_inode_pages(). That function can be called at + * This is for invalidate_mapping_pages(). That function can be called at * any time, and is not supposed to throw away dirty pages. But pages can * be marked dirty at any time too, so use remove_mapping which safely * discards clean, unused pages. @@ -303,12 +310,7 @@ unlock: } return ret; } - -unsigned long invalidate_inode_pages(struct address_space *mapping) -{ - return invalidate_mapping_pages(mapping, 0, ~0UL); -} -EXPORT_SYMBOL(invalidate_inode_pages); +EXPORT_SYMBOL(invalidate_mapping_pages); /* * This is like invalidate_complete_page(), except it ignores the page's @@ -373,10 +375,10 @@ int invalidate_inode_pages2_range(struct address_space *mapping, pagevec_init(&pvec, 0); next = start; - while (next <= end && !ret && !wrapped && + while (next <= end && !wrapped && pagevec_lookup(&pvec, mapping, next, min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) { - for (i = 0; !ret && i < pagevec_count(&pvec); i++) { + for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; pgoff_t page_index; @@ -422,7 +424,6 @@ int invalidate_inode_pages2_range(struct address_space *mapping, pagevec_release(&pvec); cond_resched(); } - WARN_ON_ONCE(ret); return ret; } EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);