NFS: alloc nfs_read/write_data as direct I/O is scheduled
[powerpc.git] / fs / nfs / direct.c
1 /*
2  * linux/fs/nfs/direct.c
3  *
4  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5  *
6  * High-performance uncached I/O for the Linux NFS client
7  *
8  * There are important applications whose performance or correctness
9  * depends on uncached access to file data.  Database clusters
10  * (multiple copies of the same instance running on separate hosts)
11  * implement their own cache coherency protocol that subsumes file
12  * system cache protocols.  Applications that process datasets
13  * considerably larger than the client's memory do not always benefit
14  * from a local cache.  A streaming video server, for instance, has no
15  * need to cache the contents of a file.
16  *
17  * When an application requests uncached I/O, all read and write requests
18  * are made directly to the server; data stored or fetched via these
19  * requests is not cached in the Linux page cache.  The client does not
20  * correct unaligned requests from applications.  All requested bytes are
21  * held on permanent storage before a direct write system call returns to
22  * an application.
23  *
24  * Solaris implements an uncached I/O facility called directio() that
25  * is used for backups and sequential I/O to very large files.  Solaris
26  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27  * an undocumented mount option.
28  *
29  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30  * help from Andrew Morton.
31  *
32  * 18 Dec 2001  Initial implementation for 2.4  --cel
33  * 08 Jul 2002  Version for 2.4.19, with bug fixes --trondmy
34  * 08 Jun 2003  Port to 2.5 APIs  --cel
35  * 31 Mar 2004  Handle direct I/O without VFS support  --cel
36  * 15 Sep 2004  Parallel async reads  --cel
37  * 04 May 2005  support O_DIRECT with aio  --cel
38  *
39  */
40
41 #include <linux/config.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/kernel.h>
45 #include <linux/smp_lock.h>
46 #include <linux/file.h>
47 #include <linux/pagemap.h>
48 #include <linux/kref.h>
49
50 #include <linux/nfs_fs.h>
51 #include <linux/nfs_page.h>
52 #include <linux/sunrpc/clnt.h>
53
54 #include <asm/system.h>
55 #include <asm/uaccess.h>
56 #include <asm/atomic.h>
57
58 #include "iostat.h"
59
60 #define NFSDBG_FACILITY         NFSDBG_VFS
61
62 static kmem_cache_t *nfs_direct_cachep;
63
64 /*
65  * This represents a set of asynchronous requests that we're waiting on
66  */
67 struct nfs_direct_req {
68         struct kref             kref;           /* release manager */
69
70         /* I/O parameters */
71         struct nfs_open_context *ctx;           /* file open context info */
72         struct kiocb *          iocb;           /* controlling i/o request */
73         struct inode *          inode;          /* target file of i/o */
74
75         /* completion state */
76         atomic_t                io_count;       /* i/os we're waiting for */
77         spinlock_t              lock;           /* protect completion state */
78         ssize_t                 count,          /* bytes actually processed */
79                                 error;          /* any reported error */
80         struct completion       completion;     /* wait for i/o completion */
81
82         /* commit state */
83         struct list_head        rewrite_list;   /* saved nfs_write_data structs */
84         struct nfs_write_data * commit_data;    /* special write_data for commits */
85         int                     flags;
86 #define NFS_ODIRECT_DO_COMMIT           (1)     /* an unstable reply was received */
87 #define NFS_ODIRECT_RESCHED_WRITES      (2)     /* write verification failed */
88         struct nfs_writeverf    verf;           /* unstable write verifier */
89 };
90
91 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
92 static const struct rpc_call_ops nfs_write_direct_ops;
93
94 static inline void get_dreq(struct nfs_direct_req *dreq)
95 {
96         atomic_inc(&dreq->io_count);
97 }
98
99 static inline int put_dreq(struct nfs_direct_req *dreq)
100 {
101         return atomic_dec_and_test(&dreq->io_count);
102 }
103
104 /*
105  * "size" is never larger than rsize or wsize.
106  */
107 static inline int nfs_direct_count_pages(unsigned long user_addr, size_t size)
108 {
109         int page_count;
110
111         page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
112         page_count -= user_addr >> PAGE_SHIFT;
113         BUG_ON(page_count < 0);
114
115         return page_count;
116 }
117
118 static inline unsigned int nfs_max_pages(unsigned int size)
119 {
120         return (size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
121 }
122
123 /**
124  * nfs_direct_IO - NFS address space operation for direct I/O
125  * @rw: direction (read or write)
126  * @iocb: target I/O control block
127  * @iov: array of vectors that define I/O buffer
128  * @pos: offset in file to begin the operation
129  * @nr_segs: size of iovec array
130  *
131  * The presence of this routine in the address space ops vector means
132  * the NFS client supports direct I/O.  However, we shunt off direct
133  * read and write requests before the VFS gets them, so this method
134  * should never be called.
135  */
136 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
137 {
138         dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
139                         iocb->ki_filp->f_dentry->d_name.name,
140                         (long long) pos, nr_segs);
141
142         return -EINVAL;
143 }
144
145 static void nfs_direct_dirty_pages(struct page **pages, int npages)
146 {
147         int i;
148         for (i = 0; i < npages; i++) {
149                 struct page *page = pages[i];
150                 if (!PageCompound(page))
151                         set_page_dirty_lock(page);
152         }
153 }
154
155 static void nfs_direct_release_pages(struct page **pages, int npages)
156 {
157         int i;
158         for (i = 0; i < npages; i++)
159                 page_cache_release(pages[i]);
160 }
161
162 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
163 {
164         struct nfs_direct_req *dreq;
165
166         dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
167         if (!dreq)
168                 return NULL;
169
170         kref_init(&dreq->kref);
171         kref_get(&dreq->kref);
172         init_completion(&dreq->completion);
173         INIT_LIST_HEAD(&dreq->rewrite_list);
174         dreq->iocb = NULL;
175         dreq->ctx = NULL;
176         spin_lock_init(&dreq->lock);
177         atomic_set(&dreq->io_count, 0);
178         dreq->count = 0;
179         dreq->error = 0;
180         dreq->flags = 0;
181
182         return dreq;
183 }
184
185 static void nfs_direct_req_release(struct kref *kref)
186 {
187         struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
188
189         if (dreq->ctx != NULL)
190                 put_nfs_open_context(dreq->ctx);
191         kmem_cache_free(nfs_direct_cachep, dreq);
192 }
193
194 /*
195  * Collects and returns the final error value/byte-count.
196  */
197 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
198 {
199         ssize_t result = -EIOCBQUEUED;
200
201         /* Async requests don't wait here */
202         if (dreq->iocb)
203                 goto out;
204
205         result = wait_for_completion_interruptible(&dreq->completion);
206
207         if (!result)
208                 result = dreq->error;
209         if (!result)
210                 result = dreq->count;
211
212 out:
213         kref_put(&dreq->kref, nfs_direct_req_release);
214         return (ssize_t) result;
215 }
216
217 /*
218  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
219  * the iocb is still valid here if this is a synchronous request.
220  */
221 static void nfs_direct_complete(struct nfs_direct_req *dreq)
222 {
223         if (dreq->iocb) {
224                 long res = (long) dreq->error;
225                 if (!res)
226                         res = (long) dreq->count;
227                 aio_complete(dreq->iocb, res, 0);
228         }
229         complete_all(&dreq->completion);
230
231         kref_put(&dreq->kref, nfs_direct_req_release);
232 }
233
234 /*
235  * We must hold a reference to all the pages in this direct read request
236  * until the RPCs complete.  This could be long *after* we are woken up in
237  * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
238  */
239 static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
240 {
241         struct nfs_read_data *data = calldata;
242         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
243
244         if (nfs_readpage_result(task, data) != 0)
245                 return;
246
247         nfs_direct_dirty_pages(data->pagevec, data->npages);
248         nfs_direct_release_pages(data->pagevec, data->npages);
249
250         spin_lock(&dreq->lock);
251
252         if (likely(task->tk_status >= 0))
253                 dreq->count += data->res.count;
254         else
255                 dreq->error = task->tk_status;
256
257         spin_unlock(&dreq->lock);
258
259         if (put_dreq(dreq))
260                 nfs_direct_complete(dreq);
261 }
262
263 static const struct rpc_call_ops nfs_read_direct_ops = {
264         .rpc_call_done = nfs_direct_read_result,
265         .rpc_release = nfs_readdata_release,
266 };
267
268 /*
269  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
270  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
271  * bail and stop sending more reads.  Read length accounting is
272  * handled automatically by nfs_direct_read_result().  Otherwise, if
273  * no requests have been sent, just return an error.
274  */
275 static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos)
276 {
277         struct nfs_open_context *ctx = dreq->ctx;
278         struct inode *inode = ctx->dentry->d_inode;
279         size_t rsize = NFS_SERVER(inode)->rsize;
280         unsigned int rpages = nfs_max_pages(rsize);
281         unsigned int pgbase;
282         int result;
283         ssize_t started = 0;
284
285         get_dreq(dreq);
286
287         pgbase = user_addr & ~PAGE_MASK;
288         do {
289                 struct nfs_read_data *data;
290                 size_t bytes;
291
292                 result = -ENOMEM;
293                 data = nfs_readdata_alloc(rpages);
294                 if (unlikely(!data))
295                         break;
296
297                 bytes = rsize;
298                 if (count < rsize)
299                         bytes = count;
300
301                 data->npages = nfs_direct_count_pages(user_addr, bytes);
302                 down_read(&current->mm->mmap_sem);
303                 result = get_user_pages(current, current->mm, user_addr,
304                                         data->npages, 1, 0, data->pagevec, NULL);
305                 up_read(&current->mm->mmap_sem);
306                 if (unlikely(result < data->npages)) {
307                         if (result > 0)
308                                 nfs_direct_release_pages(data->pagevec, result);
309                         nfs_readdata_release(data);
310                         break;
311                 }
312
313                 get_dreq(dreq);
314
315                 data->req = (struct nfs_page *) dreq;
316                 data->inode = inode;
317                 data->cred = ctx->cred;
318                 data->args.fh = NFS_FH(inode);
319                 data->args.context = ctx;
320                 data->args.offset = pos;
321                 data->args.pgbase = pgbase;
322                 data->args.pages = data->pagevec;
323                 data->args.count = bytes;
324                 data->res.fattr = &data->fattr;
325                 data->res.eof = 0;
326                 data->res.count = bytes;
327
328                 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
329                                 &nfs_read_direct_ops, data);
330                 NFS_PROTO(inode)->read_setup(data);
331
332                 data->task.tk_cookie = (unsigned long) inode;
333
334                 lock_kernel();
335                 rpc_execute(&data->task);
336                 unlock_kernel();
337
338                 dfprintk(VFS, "NFS: %5u initiated direct read call (req %s/%Ld, %zu bytes @ offset %Lu)\n",
339                                 data->task.tk_pid,
340                                 inode->i_sb->s_id,
341                                 (long long)NFS_FILEID(inode),
342                                 bytes,
343                                 (unsigned long long)data->args.offset);
344
345                 started += bytes;
346                 user_addr += bytes;
347                 pos += bytes;
348                 pgbase += bytes;
349                 pgbase &= ~PAGE_MASK;
350
351                 count -= bytes;
352         } while (count != 0);
353
354         if (put_dreq(dreq))
355                 nfs_direct_complete(dreq);
356
357         if (started)
358                 return 0;
359         return result < 0 ? (ssize_t) result : -EFAULT;
360 }
361
362 static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
363 {
364         ssize_t result = 0;
365         sigset_t oldset;
366         struct inode *inode = iocb->ki_filp->f_mapping->host;
367         struct rpc_clnt *clnt = NFS_CLIENT(inode);
368         struct nfs_direct_req *dreq;
369
370         dreq = nfs_direct_req_alloc();
371         if (!dreq)
372                 return -ENOMEM;
373
374         dreq->inode = inode;
375         dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
376         if (!is_sync_kiocb(iocb))
377                 dreq->iocb = iocb;
378
379         nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
380         rpc_clnt_sigmask(clnt, &oldset);
381         result = nfs_direct_read_schedule(dreq, user_addr, count, pos);
382         if (!result)
383                 result = nfs_direct_wait(dreq);
384         rpc_clnt_sigunmask(clnt, &oldset);
385
386         return result;
387 }
388
389 static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
390 {
391         while (!list_empty(&dreq->rewrite_list)) {
392                 struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
393                 list_del(&data->pages);
394                 nfs_direct_release_pages(data->pagevec, data->npages);
395                 nfs_writedata_release(data);
396         }
397 }
398
399 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
400 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
401 {
402         struct inode *inode = dreq->inode;
403         struct list_head *p;
404         struct nfs_write_data *data;
405
406         dreq->count = 0;
407         get_dreq(dreq);
408
409         list_for_each(p, &dreq->rewrite_list) {
410                 data = list_entry(p, struct nfs_write_data, pages);
411
412                 get_dreq(dreq);
413
414                 /*
415                  * Reset data->res.
416                  */
417                 nfs_fattr_init(&data->fattr);
418                 data->res.count = data->args.count;
419                 memset(&data->verf, 0, sizeof(data->verf));
420
421                 /*
422                  * Reuse data->task; data->args should not have changed
423                  * since the original request was sent.
424                  */
425                 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
426                                 &nfs_write_direct_ops, data);
427                 NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE);
428
429                 data->task.tk_priority = RPC_PRIORITY_NORMAL;
430                 data->task.tk_cookie = (unsigned long) inode;
431
432                 /*
433                  * We're called via an RPC callback, so BKL is already held.
434                  */
435                 rpc_execute(&data->task);
436
437                 dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
438                                 data->task.tk_pid,
439                                 inode->i_sb->s_id,
440                                 (long long)NFS_FILEID(inode),
441                                 data->args.count,
442                                 (unsigned long long)data->args.offset);
443         }
444
445         if (put_dreq(dreq))
446                 nfs_direct_write_complete(dreq, inode);
447 }
448
449 static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
450 {
451         struct nfs_write_data *data = calldata;
452         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
453
454         /* Call the NFS version-specific code */
455         if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
456                 return;
457         if (unlikely(task->tk_status < 0)) {
458                 dreq->error = task->tk_status;
459                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
460         }
461         if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
462                 dprintk("NFS: %5u commit verify failed\n", task->tk_pid);
463                 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
464         }
465
466         dprintk("NFS: %5u commit returned %d\n", task->tk_pid, task->tk_status);
467         nfs_direct_write_complete(dreq, data->inode);
468 }
469
470 static const struct rpc_call_ops nfs_commit_direct_ops = {
471         .rpc_call_done = nfs_direct_commit_result,
472         .rpc_release = nfs_commit_release,
473 };
474
475 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
476 {
477         struct nfs_write_data *data = dreq->commit_data;
478
479         data->inode = dreq->inode;
480         data->cred = dreq->ctx->cred;
481
482         data->args.fh = NFS_FH(data->inode);
483         data->args.offset = 0;
484         data->args.count = 0;
485         data->res.count = 0;
486         data->res.fattr = &data->fattr;
487         data->res.verf = &data->verf;
488
489         rpc_init_task(&data->task, NFS_CLIENT(dreq->inode), RPC_TASK_ASYNC,
490                                 &nfs_commit_direct_ops, data);
491         NFS_PROTO(data->inode)->commit_setup(data, 0);
492
493         data->task.tk_priority = RPC_PRIORITY_NORMAL;
494         data->task.tk_cookie = (unsigned long)data->inode;
495         /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
496         dreq->commit_data = NULL;
497
498         dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
499
500         lock_kernel();
501         rpc_execute(&data->task);
502         unlock_kernel();
503 }
504
505 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
506 {
507         int flags = dreq->flags;
508
509         dreq->flags = 0;
510         switch (flags) {
511                 case NFS_ODIRECT_DO_COMMIT:
512                         nfs_direct_commit_schedule(dreq);
513                         break;
514                 case NFS_ODIRECT_RESCHED_WRITES:
515                         nfs_direct_write_reschedule(dreq);
516                         break;
517                 default:
518                         nfs_end_data_update(inode);
519                         if (dreq->commit_data != NULL)
520                                 nfs_commit_free(dreq->commit_data);
521                         nfs_direct_free_writedata(dreq);
522                         nfs_direct_complete(dreq);
523         }
524 }
525
526 static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
527 {
528         dreq->commit_data = nfs_commit_alloc(0);
529         if (dreq->commit_data != NULL)
530                 dreq->commit_data->req = (struct nfs_page *) dreq;
531 }
532 #else
533 static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
534 {
535         dreq->commit_data = NULL;
536 }
537
538 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
539 {
540         nfs_end_data_update(inode);
541         nfs_direct_free_writedata(dreq);
542         nfs_direct_complete(dreq);
543 }
544 #endif
545
546 static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
547 {
548         struct nfs_write_data *data = calldata;
549         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
550         int status = task->tk_status;
551
552         if (nfs_writeback_done(task, data) != 0)
553                 return;
554
555         spin_lock(&dreq->lock);
556
557         if (likely(status >= 0))
558                 dreq->count += data->res.count;
559         else
560                 dreq->error = task->tk_status;
561
562         if (data->res.verf->committed != NFS_FILE_SYNC) {
563                 switch (dreq->flags) {
564                         case 0:
565                                 memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
566                                 dreq->flags = NFS_ODIRECT_DO_COMMIT;
567                                 break;
568                         case NFS_ODIRECT_DO_COMMIT:
569                                 if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
570                                         dprintk("NFS: %5u write verify failed\n", task->tk_pid);
571                                         dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
572                                 }
573                 }
574         }
575
576         spin_unlock(&dreq->lock);
577 }
578
579 /*
580  * NB: Return the value of the first error return code.  Subsequent
581  *     errors after the first one are ignored.
582  */
583 static void nfs_direct_write_release(void *calldata)
584 {
585         struct nfs_write_data *data = calldata;
586         struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
587
588         if (put_dreq(dreq))
589                 nfs_direct_write_complete(dreq, data->inode);
590 }
591
592 static const struct rpc_call_ops nfs_write_direct_ops = {
593         .rpc_call_done = nfs_direct_write_result,
594         .rpc_release = nfs_direct_write_release,
595 };
596
597 /*
598  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
599  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
600  * bail and stop sending more writes.  Write length accounting is
601  * handled automatically by nfs_direct_write_result().  Otherwise, if
602  * no requests have been sent, just return an error.
603  */
604 static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync)
605 {
606         struct nfs_open_context *ctx = dreq->ctx;
607         struct inode *inode = ctx->dentry->d_inode;
608         size_t wsize = NFS_SERVER(inode)->wsize;
609         unsigned int wpages = nfs_max_pages(wsize);
610         unsigned int pgbase;
611         int result;
612         ssize_t started = 0;
613
614         get_dreq(dreq);
615
616         pgbase = user_addr & ~PAGE_MASK;
617         do {
618                 struct nfs_write_data *data;
619                 size_t bytes;
620
621                 result = -ENOMEM;
622                 data = nfs_writedata_alloc(wpages);
623                 if (unlikely(!data))
624                         break;
625
626                 bytes = wsize;
627                 if (count < wsize)
628                         bytes = count;
629
630                 data->npages = nfs_direct_count_pages(user_addr, bytes);
631                 down_read(&current->mm->mmap_sem);
632                 result = get_user_pages(current, current->mm, user_addr,
633                                         data->npages, 0, 0, data->pagevec, NULL);
634                 up_read(&current->mm->mmap_sem);
635                 if (unlikely(result < data->npages)) {
636                         if (result > 0)
637                                 nfs_direct_release_pages(data->pagevec, result);
638                         nfs_writedata_release(data);
639                         break;
640                 }
641
642                 get_dreq(dreq);
643
644                 list_move_tail(&data->pages, &dreq->rewrite_list);
645
646                 data->req = (struct nfs_page *) dreq;
647                 data->inode = inode;
648                 data->cred = ctx->cred;
649                 data->args.fh = NFS_FH(inode);
650                 data->args.context = ctx;
651                 data->args.offset = pos;
652                 data->args.pgbase = pgbase;
653                 data->args.pages = data->pagevec;
654                 data->args.count = bytes;
655                 data->res.fattr = &data->fattr;
656                 data->res.count = bytes;
657                 data->res.verf = &data->verf;
658
659                 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
660                                 &nfs_write_direct_ops, data);
661                 NFS_PROTO(inode)->write_setup(data, sync);
662
663                 data->task.tk_priority = RPC_PRIORITY_NORMAL;
664                 data->task.tk_cookie = (unsigned long) inode;
665
666                 lock_kernel();
667                 rpc_execute(&data->task);
668                 unlock_kernel();
669
670                 dfprintk(VFS, "NFS: %5u initiated direct write call (req %s/%Ld, %zu bytes @ offset %Lu)\n",
671                                 data->task.tk_pid,
672                                 inode->i_sb->s_id,
673                                 (long long)NFS_FILEID(inode),
674                                 bytes,
675                                 (unsigned long long)data->args.offset);
676
677                 started += bytes;
678                 user_addr += bytes;
679                 pos += bytes;
680                 pgbase += bytes;
681                 pgbase &= ~PAGE_MASK;
682
683                 count -= bytes;
684         } while (count != 0);
685
686         if (put_dreq(dreq))
687                 nfs_direct_write_complete(dreq, inode);
688
689         if (started)
690                 return 0;
691         return result < 0 ? (ssize_t) result : -EFAULT;
692 }
693
694 static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
695 {
696         ssize_t result = 0;
697         sigset_t oldset;
698         struct inode *inode = iocb->ki_filp->f_mapping->host;
699         struct rpc_clnt *clnt = NFS_CLIENT(inode);
700         struct nfs_direct_req *dreq;
701         size_t wsize = NFS_SERVER(inode)->wsize;
702         int sync = 0;
703
704         dreq = nfs_direct_req_alloc();
705         if (!dreq)
706                 return -ENOMEM;
707         nfs_alloc_commit_data(dreq);
708
709         if (dreq->commit_data == NULL || count < wsize)
710                 sync = FLUSH_STABLE;
711
712         dreq->inode = inode;
713         dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
714         if (!is_sync_kiocb(iocb))
715                 dreq->iocb = iocb;
716
717         nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, count);
718
719         nfs_begin_data_update(inode);
720
721         rpc_clnt_sigmask(clnt, &oldset);
722         result = nfs_direct_write_schedule(dreq, user_addr, count, pos, sync);
723         if (!result)
724                 result = nfs_direct_wait(dreq);
725         rpc_clnt_sigunmask(clnt, &oldset);
726
727         return result;
728 }
729
730 /**
731  * nfs_file_direct_read - file direct read operation for NFS files
732  * @iocb: target I/O control block
733  * @buf: user's buffer into which to read data
734  * @count: number of bytes to read
735  * @pos: byte offset in file where reading starts
736  *
737  * We use this function for direct reads instead of calling
738  * generic_file_aio_read() in order to avoid gfar's check to see if
739  * the request starts before the end of the file.  For that check
740  * to work, we must generate a GETATTR before each direct read, and
741  * even then there is a window between the GETATTR and the subsequent
742  * READ where the file size could change.  Our preference is simply
743  * to do all reads the application wants, and the server will take
744  * care of managing the end of file boundary.
745  *
746  * This function also eliminates unnecessarily updating the file's
747  * atime locally, as the NFS server sets the file's atime, and this
748  * client must read the updated atime from the server back into its
749  * cache.
750  */
751 ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
752 {
753         ssize_t retval = -EINVAL;
754         struct file *file = iocb->ki_filp;
755         struct address_space *mapping = file->f_mapping;
756
757         dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
758                 file->f_dentry->d_parent->d_name.name,
759                 file->f_dentry->d_name.name,
760                 (unsigned long) count, (long long) pos);
761
762         if (count < 0)
763                 goto out;
764         retval = -EFAULT;
765         if (!access_ok(VERIFY_WRITE, buf, count))
766                 goto out;
767         retval = 0;
768         if (!count)
769                 goto out;
770
771         retval = nfs_sync_mapping(mapping);
772         if (retval)
773                 goto out;
774
775         retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos);
776         if (retval > 0)
777                 iocb->ki_pos = pos + retval;
778
779 out:
780         return retval;
781 }
782
783 /**
784  * nfs_file_direct_write - file direct write operation for NFS files
785  * @iocb: target I/O control block
786  * @buf: user's buffer from which to write data
787  * @count: number of bytes to write
788  * @pos: byte offset in file where writing starts
789  *
790  * We use this function for direct writes instead of calling
791  * generic_file_aio_write() in order to avoid taking the inode
792  * semaphore and updating the i_size.  The NFS server will set
793  * the new i_size and this client must read the updated size
794  * back into its cache.  We let the server do generic write
795  * parameter checking and report problems.
796  *
797  * We also avoid an unnecessary invocation of generic_osync_inode(),
798  * as it is fairly meaningless to sync the metadata of an NFS file.
799  *
800  * We eliminate local atime updates, see direct read above.
801  *
802  * We avoid unnecessary page cache invalidations for normal cached
803  * readers of this file.
804  *
805  * Note that O_APPEND is not supported for NFS direct writes, as there
806  * is no atomic O_APPEND write facility in the NFS protocol.
807  */
808 ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
809 {
810         ssize_t retval;
811         struct file *file = iocb->ki_filp;
812         struct address_space *mapping = file->f_mapping;
813
814         dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n",
815                 file->f_dentry->d_parent->d_name.name,
816                 file->f_dentry->d_name.name,
817                 (unsigned long) count, (long long) pos);
818
819         retval = generic_write_checks(file, &pos, &count, 0);
820         if (retval)
821                 goto out;
822
823         retval = -EINVAL;
824         if ((ssize_t) count < 0)
825                 goto out;
826         retval = 0;
827         if (!count)
828                 goto out;
829
830         retval = -EFAULT;
831         if (!access_ok(VERIFY_READ, buf, count))
832                 goto out;
833
834         retval = nfs_sync_mapping(mapping);
835         if (retval)
836                 goto out;
837
838         retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos);
839
840         /*
841          * XXX: nfs_end_data_update() already ensures this file's
842          *      cached data is subsequently invalidated.  Do we really
843          *      need to call invalidate_inode_pages2() again here?
844          *
845          *      For aio writes, this invalidation will almost certainly
846          *      occur before the writes complete.  Kind of racey.
847          */
848         if (mapping->nrpages)
849                 invalidate_inode_pages2(mapping);
850
851         if (retval > 0)
852                 iocb->ki_pos = pos + retval;
853
854 out:
855         return retval;
856 }
857
858 /**
859  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
860  *
861  */
862 int __init nfs_init_directcache(void)
863 {
864         nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
865                                                 sizeof(struct nfs_direct_req),
866                                                 0, (SLAB_RECLAIM_ACCOUNT|
867                                                         SLAB_MEM_SPREAD),
868                                                 NULL, NULL);
869         if (nfs_direct_cachep == NULL)
870                 return -ENOMEM;
871
872         return 0;
873 }
874
875 /**
876  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
877  *
878  */
879 void __exit nfs_destroy_directcache(void)
880 {
881         if (kmem_cache_destroy(nfs_direct_cachep))
882                 printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");
883 }