projects
/
powerpc.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
[XFS] Ensure fsync does not incorrectly return EIO for pages beyond EOF.
[powerpc.git]
/
mm
/
filemap.c
diff --git
a/mm/filemap.c
b/mm/filemap.c
index
c11418d
..
5d6e4c2
100644
(file)
--- a/
mm/filemap.c
+++ b/
mm/filemap.c
@@
-37,6
+37,10
@@
#include <asm/uaccess.h>
#include <asm/mman.h>
#include <asm/uaccess.h>
#include <asm/mman.h>
+static ssize_t
+generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+ loff_t offset, unsigned long nr_segs);
+
/*
* Shared mappings implemented 30.11.1994. It's not fully working yet,
* though.
/*
* Shared mappings implemented 30.11.1994. It's not fully working yet,
* though.
@@
-54,16
+58,15
@@
*
* ->i_mmap_lock (vmtruncate)
* ->private_lock (__free_pte->__set_page_dirty_buffers)
*
* ->i_mmap_lock (vmtruncate)
* ->private_lock (__free_pte->__set_page_dirty_buffers)
- * ->swap_list_lock
- * ->swap_device_lock (exclusive_swap_page, others)
- * ->mapping->tree_lock
+ * ->swap_lock (exclusive_swap_page, others)
+ * ->mapping->tree_lock
*
* ->i_sem
* ->i_mmap_lock (truncate->unmap_mapping_range)
*
* ->mmap_sem
* ->i_mmap_lock
*
* ->i_sem
* ->i_mmap_lock (truncate->unmap_mapping_range)
*
* ->mmap_sem
* ->i_mmap_lock
- * ->page_table_lock
(various places, mainly in mmap
.c)
+ * ->page_table_lock
or pte_lock (various, mainly in memory
.c)
* ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
*
* ->mmap_sem
* ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
*
* ->mmap_sem
@@
-83,10
+86,10
@@
* ->anon_vma.lock (vma_adjust)
*
* ->anon_vma.lock
* ->anon_vma.lock (vma_adjust)
*
* ->anon_vma.lock
- * ->page_table_lock
(anon_vma_prepare and various)
+ * ->page_table_lock
or pte_lock
(anon_vma_prepare and various)
*
*
- * ->page_table_lock
- * ->swap_
device_lock
(try_to_unmap_one)
+ * ->page_table_lock
or pte_lock
+ * ->swap_
lock
(try_to_unmap_one)
* ->private_lock (try_to_unmap_one)
* ->tree_lock (try_to_unmap_one)
* ->zone.lru_lock (follow_page->mark_page_accessed)
* ->private_lock (try_to_unmap_one)
* ->tree_lock (try_to_unmap_one)
* ->zone.lru_lock (follow_page->mark_page_accessed)
@@
-149,7
+152,7
@@
static int sync_page(void *word)
* in the ->sync_page() methods make essential use of the
* page_mapping(), merely passing the page down to the backing
* device's unplug functions when it's non-NULL, which in turn
* in the ->sync_page() methods make essential use of the
* page_mapping(), merely passing the page down to the backing
* device's unplug functions when it's non-NULL, which in turn
- * ignore it for all cases but swap, where only page
->private
is
+ * ignore it for all cases but swap, where only page
_private(page)
is
* of interest. When page_mapping() does go NULL, the entire
* call stack gracefully ignores the page and returns.
* -- wli
* of interest. When page_mapping() does go NULL, the entire
* call stack gracefully ignores the page and returns.
* -- wli
@@
-302,8
+305,9
@@
EXPORT_SYMBOL(sync_page_range);
* as it forces O_SYNC writers to different parts of the same file
* to be serialised right until io completion.
*/
* as it forces O_SYNC writers to different parts of the same file
* to be serialised right until io completion.
*/
-int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
- loff_t pos, size_t count)
+static int sync_page_range_nolock(struct inode *inode,
+ struct address_space *mapping,
+ loff_t pos, size_t count)
{
pgoff_t start = pos >> PAGE_CACHE_SHIFT;
pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
{
pgoff_t start = pos >> PAGE_CACHE_SHIFT;
pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
@@
-318,7
+322,6
@@
int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
ret = wait_on_page_writeback_range(mapping, start, end);
return ret;
}
ret = wait_on_page_writeback_range(mapping, start, end);
return ret;
}
-EXPORT_SYMBOL(sync_page_range_nolock);
/**
* filemap_fdatawait - walk the list of under-writeback pages of the given
/**
* filemap_fdatawait - walk the list of under-writeback pages of the given
@@
-374,7
+377,7
@@
int filemap_write_and_wait_range(struct address_space *mapping,
* This function does not add the page to the LRU. The caller must do that.
*/
int add_to_page_cache(struct page *page, struct address_space *mapping,
* This function does not add the page to the LRU. The caller must do that.
*/
int add_to_page_cache(struct page *page, struct address_space *mapping,
- pgoff_t offset,
in
t gfp_mask)
+ pgoff_t offset,
gfp_
t gfp_mask)
{
int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
{
int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
@@
-398,7
+401,7
@@
int add_to_page_cache(struct page *page, struct address_space *mapping,
EXPORT_SYMBOL(add_to_page_cache);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
EXPORT_SYMBOL(add_to_page_cache);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- pgoff_t offset,
in
t gfp_mask)
+ pgoff_t offset,
gfp_
t gfp_mask)
{
int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
if (ret == 0)
{
int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
if (ret == 0)
@@
-588,7
+591,7
@@
EXPORT_SYMBOL(find_lock_page);
* memory exhaustion.
*/
struct page *find_or_create_page(struct address_space *mapping,
* memory exhaustion.
*/
struct page *find_or_create_page(struct address_space *mapping,
- unsigned long index,
unsigned in
t gfp_mask)
+ unsigned long index,
gfp_
t gfp_mask)
{
struct page *page, *cached_page = NULL;
int err;
{
struct page *page, *cached_page = NULL;
int err;
@@
-680,7
+683,7
@@
struct page *
grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
{
struct page *page = find_get_page(mapping, index);
grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
{
struct page *page = find_get_page(mapping, index);
-
unsigned in
t gfp_mask;
+
gfp_
t gfp_mask;
if (page) {
if (!TestSetPageLocked(page))
if (page) {
if (!TestSetPageLocked(page))
@@
-1027,8
+1030,8
@@
__generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
desc.error = 0;
do_generic_file_read(filp,ppos,&desc,file_read_actor);
retval += desc.written;
desc.error = 0;
do_generic_file_read(filp,ppos,&desc,file_read_actor);
retval += desc.written;
- if (
!retval
) {
- retval = desc.error;
+ if (
desc.error
) {
+ retval =
retval ?:
desc.error;
break;
}
}
break;
}
}
@@
-1505,15
+1508,22
@@
repeat:
return -EINVAL;
page = filemap_getpage(file, pgoff, nonblock);
return -EINVAL;
page = filemap_getpage(file, pgoff, nonblock);
+
+ /* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as
+ * done in shmem_populate calling shmem_getpage */
if (!page && !nonblock)
return -ENOMEM;
if (!page && !nonblock)
return -ENOMEM;
+
if (page) {
err = install_page(mm, vma, addr, page, prot);
if (err) {
page_cache_release(page);
return err;
}
if (page) {
err = install_page(mm, vma, addr, page, prot);
if (err) {
page_cache_release(page);
return err;
}
- } else {
+ } else if (vma->vm_flags & VM_NONLINEAR) {
+ /* No page was found just because we can't read it in now (being
+ * here implies nonblock != 0), but the page may exist, so set
+ * the PTE to fault it in later. */
err = install_file_pte(mm, vma, addr, pgoff, prot);
if (err)
return err;
err = install_file_pte(mm, vma, addr, pgoff, prot);
if (err)
return err;
@@
-1527,6
+1537,7
@@
repeat:
return 0;
}
return 0;
}
+EXPORT_SYMBOL(filemap_populate);
struct vm_operations_struct generic_file_vm_ops = {
.nopage = filemap_nopage,
struct vm_operations_struct generic_file_vm_ops = {
.nopage = filemap_nopage,
@@
-1545,7
+1556,6
@@
int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
vma->vm_ops = &generic_file_vm_ops;
return 0;
}
vma->vm_ops = &generic_file_vm_ops;
return 0;
}
-EXPORT_SYMBOL(filemap_populate);
/*
* This is for filesystems which do not implement ->writepage.
/*
* This is for filesystems which do not implement ->writepage.
@@
-2002,7
+2012,7
@@
generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
}
EXPORT_SYMBOL(generic_file_buffered_write);
}
EXPORT_SYMBOL(generic_file_buffered_write);
-ssize_t
+s
tatic s
size_t
__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t *ppos)
{
__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t *ppos)
{
@@
-2102,7
+2112,7
@@
generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
return ret;
}
return ret;
}
-ssize_t
+s
tatic s
size_t
__generic_file_write_nolock(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *ppos)
{
__generic_file_write_nolock(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *ppos)
{
@@
-2223,7
+2233,7
@@
EXPORT_SYMBOL(generic_file_writev);
* Called under i_sem for writes to S_ISREG files. Returns -EIO if something
* went wrong during pagecache shootdown.
*/
* Called under i_sem for writes to S_ISREG files. Returns -EIO if something
* went wrong during pagecache shootdown.
*/
-ssize_t
+s
tatic s
size_t
generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
loff_t offset, unsigned long nr_segs)
{
generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
loff_t offset, unsigned long nr_segs)
{
@@
-2258,4
+2268,3
@@
generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
}
return retval;
}
}
return retval;
}
-EXPORT_SYMBOL_GPL(generic_file_direct_IO);