X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=fs%2Fnfs%2Fdirect.c;h=784bbb54e6c13a42c8c50cf50b6ee830826c6c20;hb=3f442547b76bf9fb70d7aecc41cf1980459253c9;hp=76ca1cbc38f96e418f80ff69095480cef3b7b36c;hpb=5eea7ee2075b245d505285bb422e2fa8d686e5c8;p=powerpc.git diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 76ca1cbc38..784bbb54e6 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -307,9 +307,7 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo data->task.tk_cookie = (unsigned long) inode; - lock_kernel(); rpc_execute(&data->task); - unlock_kernel(); dfprintk(VFS, "NFS: %5u initiated direct read call (req %s/%Ld, %zu bytes @ offset %Lu)\n", data->task.tk_pid, @@ -475,9 +473,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); - lock_kernel(); rpc_execute(&data->task); - unlock_kernel(); } static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) @@ -497,6 +493,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode if (dreq->commit_data != NULL) nfs_commit_free(dreq->commit_data); nfs_direct_free_writedata(dreq); + nfs_zap_mapping(inode, inode->i_mapping); nfs_direct_complete(dreq); } } @@ -517,6 +514,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode { nfs_end_data_update(inode); nfs_direct_free_writedata(dreq); + nfs_zap_mapping(inode, inode->i_mapping); nfs_direct_complete(dreq); } #endif @@ -532,10 +530,12 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata) spin_lock(&dreq->lock); - if (likely(status >= 0)) - dreq->count += data->res.count; - else - dreq->error = task->tk_status; + if (unlikely(status < 0)) { + dreq->error = status; + goto out_unlock; + } + + dreq->count += data->res.count; if (data->res.verf->committed != NFS_FILE_SYNC) { switch (dreq->flags) { @@ -550,7 +550,7 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata) } } } - +out_unlock: spin_unlock(&dreq->lock); } @@ -637,9 +637,7 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l data->task.tk_priority = RPC_PRIORITY_NORMAL; data->task.tk_cookie = (unsigned long) inode; - lock_kernel(); rpc_execute(&data->task); - unlock_kernel(); dfprintk(VFS, "NFS: %5u initiated direct write call (req %s/%Ld, %zu bytes @ offset %Lu)\n", data->task.tk_pid, @@ -707,8 +705,8 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz /** * nfs_file_direct_read - file direct read operation for NFS files * @iocb: target I/O control block - * @buf: user's buffer into which to read data - * @count: number of bytes to read + * @iov: vector of user buffers into which to read data + * @nr_segs: size of iov vector * @pos: byte offset in file where reading starts * * We use this function for direct reads instead of calling @@ -725,17 +723,24 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz * client must read the updated atime from the server back into its * cache. */ -ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) +ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) { ssize_t retval = -EINVAL; struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; + /* XXX: temporary */ + const char __user *buf = iov[0].iov_base; + size_t count = iov[0].iov_len; dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n", file->f_dentry->d_parent->d_name.name, file->f_dentry->d_name.name, (unsigned long) count, (long long) pos); + if (nr_segs != 1) + return -EINVAL; + if (count < 0) goto out; retval = -EFAULT; @@ -760,8 +765,8 @@ out: /** * nfs_file_direct_write - file direct write operation for NFS files * @iocb: target I/O control block - * @buf: user's buffer from which to write data - * @count: number of bytes to write + * @iov: vector of user buffers from which to write data + * @nr_segs: size of iov vector * @pos: byte offset in file where writing starts * * We use this function for direct writes instead of calling @@ -782,17 +787,24 @@ out: * Note that O_APPEND is not supported for NFS direct writes, as there * is no atomic O_APPEND write facility in the NFS protocol. */ -ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) +ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) { ssize_t retval; struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; + /* XXX: temporary */ + const char __user *buf = iov[0].iov_base; + size_t count = iov[0].iov_len; dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n", file->f_dentry->d_parent->d_name.name, file->f_dentry->d_name.name, (unsigned long) count, (long long) pos); + if (nr_segs != 1) + return -EINVAL; + retval = generic_write_checks(file, &pos, &count, 0); if (retval) goto out; @@ -814,17 +826,6 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos); - /* - * XXX: nfs_end_data_update() already ensures this file's - * cached data is subsequently invalidated. Do we really - * need to call invalidate_inode_pages2() again here? - * - * For aio writes, this invalidation will almost certainly - * occur before the writes complete. Kind of racey. - */ - if (mapping->nrpages) - invalidate_inode_pages2(mapping); - if (retval > 0) iocb->ki_pos = pos + retval; @@ -855,6 +856,5 @@ int __init nfs_init_directcache(void) */ void nfs_destroy_directcache(void) { - if (kmem_cache_destroy(nfs_direct_cachep)) - printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n"); + kmem_cache_destroy(nfs_direct_cachep); }