port more changes to make PCI work
[linux-2.4.git] / net / sunrpc / clnt.c
1 /*
2  *  linux/net/sunrpc/rpcclnt.c
3  *
4  *  This file contains the high-level RPC interface.
5  *  It is modeled as a finite state machine to support both synchronous
6  *  and asynchronous requests.
7  *
8  *  -   RPC header generation and argument serialization.
9  *  -   Credential refresh.
10  *  -   TCP reconnect handling (when finished).
11  *  -   Retry of operation when it is suspected the operation failed because
12  *      of uid squashing on the server, or when the credentials were stale
13  *      and need to be refreshed, or when a packet was damaged in transit.
14  *      This may be have to be moved to the VFS layer.
15  *
16  *  NB: BSD uses a more intelligent approach to guessing when a request
17  *  or reply has been lost by keeping the RTO estimate for each procedure.
18  *  We currently make do with a constant timeout value.
19  *
20  *  Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21  *  Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
22  */
23
24 #include <asm/system.h>
25
26 #include <linux/types.h>
27 #include <linux/mm.h>
28 #include <linux/slab.h>
29 #include <linux/in.h>
30 #include <linux/utsname.h>
31
32 #include <linux/sunrpc/clnt.h>
33
34 #include <linux/nfs.h>
35
36
37 #define RPC_SLACK_SPACE         512     /* total overkill */
38
39 #ifdef RPC_DEBUG
40 # define RPCDBG_FACILITY        RPCDBG_CALL
41 #endif
42
43 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
44
45
46 static void     call_start(struct rpc_task *task);
47 static void     call_reserve(struct rpc_task *task);
48 static void     call_reserveresult(struct rpc_task *task);
49 static void     call_allocate(struct rpc_task *task);
50 static void     call_encode(struct rpc_task *task);
51 static void     call_decode(struct rpc_task *task);
52 static void     call_bind(struct rpc_task *task);
53 static void     call_transmit(struct rpc_task *task);
54 static void     call_status(struct rpc_task *task);
55 static void     call_refresh(struct rpc_task *task);
56 static void     call_refreshresult(struct rpc_task *task);
57 static void     call_timeout(struct rpc_task *task);
58 static void     call_connect(struct rpc_task *task);
59 static void     call_connect_status(struct rpc_task *);
60 static u32 *    call_header(struct rpc_task *task);
61 static u32 *    call_verify(struct rpc_task *task);
62
63
64 /*
65  * Create an RPC client
66  * FIXME: This should also take a flags argument (as in task->tk_flags).
67  * It's called (among others) from pmap_create_client, which may in
68  * turn be called by an async task. In this case, rpciod should not be
69  * made to sleep too long.
70  */
71 struct rpc_clnt *
72 rpc_create_client(struct rpc_xprt *xprt, char *servname,
73                   struct rpc_program *program, u32 vers, int flavor)
74 {
75         struct rpc_version      *version;
76         struct rpc_clnt         *clnt = NULL;
77
78         dprintk("RPC: creating %s client for %s (xprt %p)\n",
79                 program->name, servname, xprt);
80
81         if (!xprt)
82                 goto out;
83         if (vers >= program->nrvers || !(version = program->version[vers]))
84                 goto out;
85
86         clnt = (struct rpc_clnt *) rpc_allocate(0, sizeof(*clnt));
87         if (!clnt)
88                 goto out_no_clnt;
89         memset(clnt, 0, sizeof(*clnt));
90         atomic_set(&clnt->cl_users, 0);
91
92         clnt->cl_xprt     = xprt;
93         clnt->cl_procinfo = version->procs;
94         clnt->cl_maxproc  = version->nrprocs;
95         clnt->cl_server   = servname;
96         clnt->cl_protname = program->name;
97         clnt->cl_port     = xprt->addr.sin_port;
98         clnt->cl_prog     = program->number;
99         clnt->cl_vers     = version->number;
100         clnt->cl_prot     = xprt->prot;
101         clnt->cl_stats    = program->stats;
102         INIT_RPC_WAITQ(&clnt->cl_bindwait, "bindwait");
103
104         if (!clnt->cl_port)
105                 clnt->cl_autobind = 1;
106
107         rpc_init_rtt(&clnt->cl_rtt, xprt->timeout.to_initval);
108
109         if (!rpcauth_create(flavor, clnt))
110                 goto out_no_auth;
111
112         /* save the nodename */
113         clnt->cl_nodelen = strlen(system_utsname.nodename);
114         if (clnt->cl_nodelen > UNX_MAXNODENAME)
115                 clnt->cl_nodelen = UNX_MAXNODENAME;
116         memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);
117 out:
118         return clnt;
119
120 out_no_clnt:
121         printk(KERN_INFO "RPC: out of memory in rpc_create_client\n");
122         goto out;
123 out_no_auth:
124         printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %d)\n",
125                 flavor);
126         rpc_free(clnt);
127         clnt = NULL;
128         goto out;
129 }
130
131 /*
132  * Properly shut down an RPC client, terminating all outstanding
133  * requests. Note that we must be certain that cl_oneshot and
134  * cl_dead are cleared, or else the client would be destroyed
135  * when the last task releases it.
136  */
137 int
138 rpc_shutdown_client(struct rpc_clnt *clnt)
139 {
140         dprintk("RPC: shutting down %s client for %s\n",
141                 clnt->cl_protname, clnt->cl_server);
142         while (atomic_read(&clnt->cl_users)) {
143 #ifdef RPC_DEBUG
144                 dprintk("RPC: rpc_shutdown_client: client %s, tasks=%d\n",
145                         clnt->cl_protname, atomic_read(&clnt->cl_users));
146 #endif
147                 /* Don't let rpc_release_client destroy us */
148                 clnt->cl_oneshot = 0;
149                 clnt->cl_dead = 0;
150                 rpc_killall_tasks(clnt);
151                 sleep_on_timeout(&destroy_wait, 1*HZ);
152         }
153         return rpc_destroy_client(clnt);
154 }
155
156 /*
157  * Delete an RPC client
158  */
159 int
160 rpc_destroy_client(struct rpc_clnt *clnt)
161 {
162         dprintk("RPC: destroying %s client for %s\n",
163                         clnt->cl_protname, clnt->cl_server);
164
165         if (clnt->cl_auth) {
166                 rpcauth_destroy(clnt->cl_auth);
167                 clnt->cl_auth = NULL;
168         }
169         if (clnt->cl_xprt) {
170                 xprt_destroy(clnt->cl_xprt);
171                 clnt->cl_xprt = NULL;
172         }
173         rpc_free(clnt);
174         return 0;
175 }
176
177 /*
178  * Release an RPC client
179  */
180 void
181 rpc_release_client(struct rpc_clnt *clnt)
182 {
183         dprintk("RPC:      rpc_release_client(%p, %d)\n",
184                                 clnt, atomic_read(&clnt->cl_users));
185
186         if (!atomic_dec_and_test(&clnt->cl_users))
187                 return;
188         wake_up(&destroy_wait);
189         if (clnt->cl_oneshot || clnt->cl_dead)
190                 rpc_destroy_client(clnt);
191 }
192
193 /*
194  * Default callback for async RPC calls
195  */
196 static void
197 rpc_default_callback(struct rpc_task *task)
198 {
199 }
200
201 /*
202  *      Export the signal mask handling for aysnchronous code that
203  *      sleeps on RPC calls
204  */
205  
206 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
207 {
208         unsigned long   sigallow = sigmask(SIGKILL);
209         unsigned long   irqflags;
210         
211         /* Turn off various signals */
212         if (clnt->cl_intr) {
213                 struct k_sigaction *action = current->sig->action;
214                 if (action[SIGINT-1].sa.sa_handler == SIG_DFL)
215                         sigallow |= sigmask(SIGINT);
216                 if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
217                         sigallow |= sigmask(SIGQUIT);
218         }
219         spin_lock_irqsave(&current->sigmask_lock, irqflags);
220         *oldset = current->blocked;
221         siginitsetinv(&current->blocked, sigallow & ~oldset->sig[0]);
222         recalc_sigpending(current);
223         spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
224 }
225
226 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
227 {
228         unsigned long   irqflags;
229         
230         spin_lock_irqsave(&current->sigmask_lock, irqflags);
231         current->blocked = *oldset;
232         recalc_sigpending(current);
233         spin_unlock_irqrestore(&current->sigmask_lock, irqflags);
234 }
235
236 /*
237  * New rpc_call implementation
238  */
239 int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
240 {
241         struct rpc_task my_task, *task = &my_task;
242         sigset_t        oldset;
243         int             status;
244
245         /* If this client is slain all further I/O fails */
246         if (clnt->cl_dead) 
247                 return -EIO;
248
249         if (flags & RPC_TASK_ASYNC) {
250                 printk("rpc_call_sync: Illegal flag combination for synchronous task\n");
251                 flags &= ~RPC_TASK_ASYNC;
252         }
253
254         rpc_clnt_sigmask(clnt, &oldset);                
255
256         /* Create/initialize a new RPC task */
257         rpc_init_task(task, clnt, NULL, flags);
258         rpc_call_setup(task, msg, 0);
259
260         /* Set up the call info struct and execute the task */
261         if (task->tk_status == 0)
262                 status = rpc_execute(task);
263         else {
264                 status = task->tk_status;
265                 rpc_release_task(task);
266         }
267
268         rpc_clnt_sigunmask(clnt, &oldset);              
269
270         return status;
271 }
272
273 /*
274  * New rpc_call implementation
275  */
276 int
277 rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
278                rpc_action callback, void *data)
279 {
280         struct rpc_task *task;
281         sigset_t        oldset;
282         int             status;
283
284         /* If this client is slain all further I/O fails */
285         if (clnt->cl_dead) 
286                 return -EIO;
287
288         flags |= RPC_TASK_ASYNC;
289
290         rpc_clnt_sigmask(clnt, &oldset);                
291
292         /* Create/initialize a new RPC task */
293         if (!callback)
294                 callback = rpc_default_callback;
295         status = -ENOMEM;
296         if (!(task = rpc_new_task(clnt, callback, flags)))
297                 goto out;
298         task->tk_calldata = data;
299
300         rpc_call_setup(task, msg, 0);
301
302         /* Set up the call info struct and execute the task */
303         if (task->tk_status == 0)
304                 status = rpc_execute(task);
305         else {
306                 status = task->tk_status;
307                 rpc_release_task(task);
308         }
309
310 out:
311         rpc_clnt_sigunmask(clnt, &oldset);              
312
313         return status;
314 }
315
316
317 void
318 rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags)
319 {
320         task->tk_msg   = *msg;
321         task->tk_flags |= flags;
322         /* Bind the user cred */
323         if (task->tk_msg.rpc_cred != NULL) {
324                 rpcauth_holdcred(task);
325         } else
326                 rpcauth_bindcred(task);
327
328         if (task->tk_status == 0)
329                 task->tk_action = call_start;
330         else
331                 task->tk_action = NULL;
332 }
333
334 void
335 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
336 {
337         struct rpc_xprt *xprt = clnt->cl_xprt;
338
339         xprt->sndsize = 0;
340         if (sndsize)
341                 xprt->sndsize = sndsize + RPC_SLACK_SPACE;
342         xprt->rcvsize = 0;
343         if (rcvsize)
344                 xprt->rcvsize = rcvsize + RPC_SLACK_SPACE;
345         xprt_sock_setbufsize(xprt);
346 }
347
348 /*
349  * Restart an (async) RPC call. Usually called from within the
350  * exit handler.
351  */
352 void
353 rpc_restart_call(struct rpc_task *task)
354 {
355         if (RPC_ASSASSINATED(task))
356                 return;
357
358         task->tk_action = call_start;
359 }
360
361 /*
362  * 0.  Initial state
363  *
364  *     Other FSM states can be visited zero or more times, but
365  *     this state is visited exactly once for each RPC.
366  */
367 static void
368 call_start(struct rpc_task *task)
369 {
370         struct rpc_clnt *clnt = task->tk_client;
371
372         if (task->tk_msg.rpc_proc > clnt->cl_maxproc) {
373                 printk(KERN_ERR "%s (vers %d): bad procedure number %d\n",
374                                 clnt->cl_protname, clnt->cl_vers,
375                                 task->tk_msg.rpc_proc);
376                 rpc_exit(task, -EIO);
377                 return;
378         }
379
380         dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid,
381                 clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc,
382                 (RPC_IS_ASYNC(task) ? "async" : "sync"));
383
384         /* Increment call count */
385         rpcproc_count(clnt, task->tk_msg.rpc_proc)++;
386         clnt->cl_stats->rpccnt++;
387         task->tk_action = call_reserve;
388 }
389
390 /*
391  * 1.   Reserve an RPC call slot
392  */
393 static void
394 call_reserve(struct rpc_task *task)
395 {
396         dprintk("RPC: %4d call_reserve\n", task->tk_pid);
397
398         if (!rpcauth_uptodatecred(task)) {
399                 task->tk_action = call_refresh;
400                 return;
401         }
402
403         task->tk_status  = 0;
404         task->tk_action  = call_reserveresult;
405         xprt_reserve(task);
406 }
407
408 /*
409  * 1b.  Grok the result of xprt_reserve()
410  */
411 static void
412 call_reserveresult(struct rpc_task *task)
413 {
414         int status = task->tk_status;
415
416         dprintk("RPC: %4d call_reserveresult (status %d)\n",
417                                 task->tk_pid, task->tk_status);
418
419         /*
420          * After a call to xprt_reserve(), we must have either
421          * a request slot or else an error status.
422          */
423         task->tk_status = 0;
424         if (status >= 0) {
425                 if (task->tk_rqstp) {
426                         task->tk_action = call_allocate;
427                         return;
428                 }
429
430                 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
431                                 __FUNCTION__, status);
432                 rpc_exit(task, -EIO);
433                 return;
434         }
435
436         /*
437          * Even though there was an error, we may have acquired
438          * a request slot somehow.  Make sure not to leak it.
439          */
440         if (task->tk_rqstp) {
441                 printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
442                                 __FUNCTION__, status);
443                 xprt_release(task);
444         }
445
446         switch (status) {
447         case -EAGAIN:   /* woken up; retry */
448                 task->tk_action = call_reserve;
449                 return;
450         case -EIO:      /* probably a shutdown */
451                 break;
452         default:
453                 printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
454                                 __FUNCTION__, status);
455                 break;
456         }
457         rpc_exit(task, status);
458 }
459
460 /*
461  * 2.   Allocate the buffer. For details, see sched.c:rpc_malloc.
462  *      (Note: buffer memory is freed in rpc_task_release).
463  */
464 static void
465 call_allocate(struct rpc_task *task)
466 {
467         struct rpc_clnt *clnt = task->tk_client;
468         unsigned int    bufsiz;
469
470         dprintk("RPC: %4d call_allocate (status %d)\n", 
471                                 task->tk_pid, task->tk_status);
472         task->tk_action = call_encode;
473         if (task->tk_buffer)
474                 return;
475
476         /* FIXME: compute buffer requirements more exactly using
477          * auth->au_wslack */
478         bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc) + RPC_SLACK_SPACE;
479
480         if ((task->tk_buffer = rpc_malloc(task, bufsiz << 1)) != NULL)
481                 return;
482         printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); 
483
484         if (RPC_IS_ASYNC(task) || !(task->tk_client->cl_intr && signalled())) {
485                 xprt_release(task);
486                 task->tk_action = call_reserve;
487                 rpc_delay(task, HZ>>4);
488                 return;
489         }
490
491         rpc_exit(task, -ERESTARTSYS);
492 }
493
494 /*
495  * 3.   Encode arguments of an RPC call
496  */
497 static void
498 call_encode(struct rpc_task *task)
499 {
500         struct rpc_clnt *clnt = task->tk_client;
501         struct rpc_rqst *req = task->tk_rqstp;
502         struct xdr_buf *sndbuf = &req->rq_snd_buf;
503         struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
504         unsigned int    bufsiz;
505         kxdrproc_t      encode;
506         int             status;
507         u32             *p;
508
509         dprintk("RPC: %4d call_encode (status %d)\n", 
510                                 task->tk_pid, task->tk_status);
511
512         task->tk_action = call_bind;
513
514         /* Default buffer setup */
515         bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc)+RPC_SLACK_SPACE;
516         sndbuf->head[0].iov_base = (void *)task->tk_buffer;
517         sndbuf->head[0].iov_len  = bufsiz;
518         sndbuf->tail[0].iov_len  = 0;
519         sndbuf->page_len         = 0;
520         sndbuf->len              = 0;
521         rcvbuf->head[0].iov_base = (void *)((char *)task->tk_buffer + bufsiz);
522         rcvbuf->head[0].iov_len  = bufsiz;
523         rcvbuf->tail[0].iov_len  = 0;
524         rcvbuf->page_len         = 0;
525         rcvbuf->len              = bufsiz;
526
527         /* Zero buffer so we have automatic zero-padding of opaque & string */
528         memset(task->tk_buffer, 0, bufsiz);
529
530         /* Encode header and provided arguments */
531         encode = rpcproc_encode(clnt, task->tk_msg.rpc_proc);
532         if (!(p = call_header(task))) {
533                 printk(KERN_INFO "RPC: call_header failed, exit EIO\n");
534                 rpc_exit(task, -EIO);
535         } else
536         if (encode && (status = encode(req, p, task->tk_msg.rpc_argp)) < 0) {
537                 printk(KERN_WARNING "%s: can't encode arguments: %d\n",
538                                 clnt->cl_protname, -status);
539                 rpc_exit(task, status);
540         }
541 }
542
543 /*
544  * 4.   Get the server port number if not yet set
545  */
546 static void
547 call_bind(struct rpc_task *task)
548 {
549         struct rpc_clnt *clnt = task->tk_client;
550         struct rpc_xprt *xprt = clnt->cl_xprt;
551
552         dprintk("RPC: %4d call_bind xprt %p %s connected\n", task->tk_pid,
553                         xprt, (xprt_connected(xprt) ? "is" : "is not"));
554
555         task->tk_action = (xprt_connected(xprt)) ? call_transmit : call_connect;
556
557         if (!clnt->cl_port) {
558                 task->tk_action = call_connect;
559                 task->tk_timeout = clnt->cl_timeout.to_maxval;
560                 rpc_getport(task, clnt);
561         }
562 }
563
564 /*
565  * 4a.  Establish socket
566  *      Connect to the RPC server (TCP case)
567  */
568 static void
569 call_connect(struct rpc_task *task)
570 {
571         struct rpc_clnt *clnt = task->tk_client;
572
573         dprintk("RPC: %4d call_connect status %d\n",
574                                 task->tk_pid, task->tk_status);
575
576         if (xprt_connected(clnt->cl_xprt)) {
577                 task->tk_action = call_transmit;
578                 return;
579         }
580         task->tk_action = call_connect_status;
581         if (task->tk_status < 0)
582                 return;
583         xprt_connect(task);
584 }
585
586 /*
587  * 4b.  Sort out reconnection result
588  */
589 static void call_connect_status(struct rpc_task *task)
590 {
591         struct rpc_clnt *clnt = task->tk_client;
592         int status = task->tk_status;
593
594         task->tk_status = 0;
595         if (status >= 0) {
596                 clnt->cl_stats->netreconn++;
597                 task->tk_action = call_transmit;
598                 return;
599         }
600
601         /* Something failed: we may have to rebind */
602         if (clnt->cl_autobind)
603                 clnt->cl_port = 0;
604         switch (status) {
605         case -ECONNREFUSED:
606         case -ECONNRESET:
607         case -ENOTCONN:
608         case -ETIMEDOUT:
609         case -EAGAIN:
610                 task->tk_action = (clnt->cl_port == 0) ? call_bind : call_connect;
611                 break;
612         default:
613                 rpc_exit(task, status);
614         }
615 }
616
617 /*
618  * 5.   Transmit the RPC request, and wait for reply
619  */
620 static void
621 call_transmit(struct rpc_task *task)
622 {
623         struct rpc_clnt *clnt = task->tk_client;
624
625         dprintk("RPC: %4d call_transmit (status %d)\n", 
626                                 task->tk_pid, task->tk_status);
627
628         task->tk_action = call_status;
629         if (task->tk_status < 0)
630                 return;
631         xprt_transmit(task);
632         if (!rpcproc_decode(clnt, task->tk_msg.rpc_proc) && task->tk_status >= 0) {
633                 task->tk_action = NULL;
634                 rpc_wake_up_task(task);
635         }
636 }
637
638 /*
639  * 6.   Sort out the RPC call status
640  */
641 static void
642 call_status(struct rpc_task *task)
643 {
644         struct rpc_clnt *clnt = task->tk_client;
645         struct rpc_xprt *xprt = clnt->cl_xprt;
646         struct rpc_rqst *req = task->tk_rqstp;
647         int             status;
648
649         smp_rmb();
650         if (req->rq_received > 0 && !req->rq_bytes_sent)
651                 task->tk_status = req->rq_received;
652
653         dprintk("RPC: %4d call_status (status %d)\n", 
654                                 task->tk_pid, task->tk_status);
655
656         status = task->tk_status;
657         if (status >= 0) {
658                 task->tk_action = call_decode;
659                 return;
660         }
661
662         task->tk_status = 0;
663         switch(status) {
664         case -ETIMEDOUT:
665                 task->tk_action = call_timeout;
666                 break;
667         case -ECONNREFUSED:
668         case -ENOTCONN:
669                 req->rq_bytes_sent = 0;
670                 if (clnt->cl_autobind || !clnt->cl_port) {
671                         clnt->cl_port = 0;
672                         task->tk_action = call_bind;
673                         break;
674                 }
675                 task->tk_action = call_connect;
676                 break;
677                 /*
678                  * Sleep and dream of an open connection
679                  */
680                 task->tk_timeout = 5 * HZ;
681                 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
682         case -ENOMEM:
683         case -EAGAIN:
684                 task->tk_action = call_transmit;
685                 break;
686         default:
687                 if (clnt->cl_chatty)
688                         printk("%s: RPC call returned error %d\n",
689                                clnt->cl_protname, -status);
690                 rpc_exit(task, status);
691         }
692 }
693
694 /*
695  * 6a.  Handle RPC timeout
696  *      We do not release the request slot, so we keep using the
697  *      same XID for all retransmits.
698  */
699 static void
700 call_timeout(struct rpc_task *task)
701 {
702         struct rpc_clnt *clnt = task->tk_client;
703         struct rpc_timeout *to = &task->tk_rqstp->rq_timeout;
704
705         if (xprt_adjust_timeout(to)) {
706                 dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid);
707                 goto retry;
708         }
709         to->to_retries = clnt->cl_timeout.to_retries;
710
711         dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid);
712         if (clnt->cl_softrtry) {
713                 if (clnt->cl_chatty)
714                         printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
715                                 clnt->cl_protname, clnt->cl_server);
716                 rpc_exit(task, -EIO);
717                 return;
718         }
719
720         if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) {
721                 task->tk_flags |= RPC_CALL_MAJORSEEN;
722                 printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
723                         clnt->cl_protname, clnt->cl_server);
724         }
725         if (clnt->cl_autobind)
726                 clnt->cl_port = 0;
727
728 retry:
729         clnt->cl_stats->rpcretrans++;
730         task->tk_action = call_bind;
731         task->tk_status = 0;
732 }
733
734 /*
735  * 7.   Decode the RPC reply
736  */
737 static void
738 call_decode(struct rpc_task *task)
739 {
740         struct rpc_clnt *clnt = task->tk_client;
741         struct rpc_rqst *req = task->tk_rqstp;
742         kxdrproc_t      decode = rpcproc_decode(clnt, task->tk_msg.rpc_proc);
743         u32             *p;
744
745         dprintk("RPC: %4d call_decode (status %d)\n", 
746                                 task->tk_pid, task->tk_status);
747
748         if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) {
749                 printk(KERN_NOTICE "%s: server %s OK\n",
750                         clnt->cl_protname, clnt->cl_server);
751                 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
752         }
753
754         if (task->tk_status < 12) {
755                 if (!clnt->cl_softrtry) {
756                         task->tk_action = call_transmit;
757                         clnt->cl_stats->rpcretrans++;
758                         goto out_retry;
759                 }
760                 printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n",
761                         clnt->cl_protname, task->tk_status);
762                 rpc_exit(task, -EIO);
763                 return;
764         }
765
766         /* Check that the softirq receive buffer is valid */
767         if (unlikely(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
768                                 sizeof(req->rq_rcv_buf)) != 0))
769                 printk(KERN_WARNING "%s: receive buffer is inconsistent. Please contact maintainer.\n",
770                                 __FUNCTION__);
771
772         /* Verify the RPC header */
773         if (!(p = call_verify(task))) {
774                 /*
775                  * When call_verfiy sets tk_action to NULL (via task_exit)
776                  * a non-retry-able error has occurred (like the server
777                  * not supporting a particular procedure call).
778                  */
779                 if (task->tk_action == NULL)
780                         return;
781                 goto out_retry;
782         }
783         /*
784          * The following is an NFS-specific hack to cater for setuid
785          * processes whose uid is mapped to nobody on the server.
786          */
787         if (task->tk_client->cl_droppriv && 
788             (ntohl(*p) == NFSERR_ACCES || ntohl(*p) == NFSERR_PERM)) {
789                 if (RPC_IS_SETUID(task) && task->tk_suid_retry) {
790                         dprintk("RPC: %4d retry squashed uid\n", task->tk_pid);
791                         task->tk_flags ^= RPC_CALL_REALUID;
792                         task->tk_action = call_encode;
793                         task->tk_suid_retry--;
794                         goto out_retry;
795                 }
796         }
797
798         task->tk_action = NULL;
799
800         if (decode)
801                 task->tk_status = decode(req, p, task->tk_msg.rpc_resp);
802         dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
803                                         task->tk_status);
804         return;
805 out_retry:
806         req->rq_received = 0;
807         task->tk_status = 0;
808 }
809
810 /*
811  * 8.   Refresh the credentials if rejected by the server
812  */
813 static void
814 call_refresh(struct rpc_task *task)
815 {
816         dprintk("RPC: %4d call_refresh\n", task->tk_pid);
817
818         xprt_release(task);     /* Must do to obtain new XID */
819         task->tk_action = call_refreshresult;
820         task->tk_status = 0;
821         task->tk_client->cl_stats->rpcauthrefresh++;
822         rpcauth_refreshcred(task);
823 }
824
825 /*
826  * 8a.  Process the results of a credential refresh
827  */
828 static void
829 call_refreshresult(struct rpc_task *task)
830 {
831         dprintk("RPC: %4d call_refreshresult (status %d)\n", 
832                                 task->tk_pid, task->tk_status);
833
834         if (task->tk_status < 0)
835                 rpc_exit(task, -EACCES);
836         else
837                 task->tk_action = call_reserve;
838 }
839
840 /*
841  * Call header serialization
842  */
843 static u32 *
844 call_header(struct rpc_task *task)
845 {
846         struct rpc_clnt *clnt = task->tk_client;
847         struct rpc_xprt *xprt = clnt->cl_xprt;
848         struct rpc_rqst *req = task->tk_rqstp;
849         u32             *p = req->rq_svec[0].iov_base;
850
851         /* FIXME: check buffer size? */
852         if (xprt->stream)
853                 *p++ = 0;               /* fill in later */
854         *p++ = req->rq_xid;             /* XID */
855         *p++ = htonl(RPC_CALL);         /* CALL */
856         *p++ = htonl(RPC_VERSION);      /* RPC version */
857         *p++ = htonl(clnt->cl_prog);    /* program number */
858         *p++ = htonl(clnt->cl_vers);    /* program version */
859         *p++ = htonl(task->tk_msg.rpc_proc);    /* procedure */
860         return rpcauth_marshcred(task, p);
861 }
862
863 /*
864  * Reply header verification
865  */
866 static u32 *
867 call_verify(struct rpc_task *task)
868 {
869         u32     *p = task->tk_rqstp->rq_rvec[0].iov_base, n;
870
871         p += 1; /* skip XID */
872
873         if ((n = ntohl(*p++)) != RPC_REPLY) {
874                 printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n);
875                 goto garbage;
876         }
877         if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
878                 int     error = -EACCES;
879
880                 if ((n = ntohl(*p++)) != RPC_AUTH_ERROR) {
881                         printk(KERN_WARNING "call_verify: RPC call rejected: %x\n", n);
882                 } else
883                 switch ((n = ntohl(*p++))) {
884                 case RPC_AUTH_REJECTEDCRED:
885                 case RPC_AUTH_REJECTEDVERF:
886                         if (!task->tk_cred_retry)
887                                 break;
888                         task->tk_cred_retry--;
889                         dprintk("RPC: %4d call_verify: retry stale creds\n",
890                                                         task->tk_pid);
891                         rpcauth_invalcred(task);
892                         task->tk_action = call_refresh;
893                         return NULL;
894                 case RPC_AUTH_BADCRED:
895                 case RPC_AUTH_BADVERF:
896                         /* possibly garbled cred/verf? */
897                         if (!task->tk_garb_retry)
898                                 break;
899                         task->tk_garb_retry--;
900                         dprintk("RPC: %4d call_verify: retry garbled creds\n",
901                                                         task->tk_pid);
902                         task->tk_action = call_encode;
903                         return NULL;
904                 case RPC_AUTH_TOOWEAK:
905                         printk(KERN_NOTICE "call_verify: server requires stronger "
906                                "authentication.\n");
907                         break;
908                 default:
909                         printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n);
910                         error = -EIO;
911                 }
912                 dprintk("RPC: %4d call_verify: call rejected %d\n",
913                                                 task->tk_pid, n);
914                 rpc_exit(task, error);
915                 return NULL;
916         }
917         if (!(p = rpcauth_checkverf(task, p))) {
918                 printk(KERN_WARNING "call_verify: auth check failed\n");
919                 goto garbage;           /* bad verifier, retry */
920         }
921         switch ((n = ntohl(*p++))) {
922         case RPC_SUCCESS:
923                 return p;
924         case RPC_PROG_UNAVAIL:
925                 printk(KERN_WARNING "RPC: call_verify: program %u is unsupported by server %s\n",
926                                 (unsigned int)task->tk_client->cl_prog,
927                                 task->tk_client->cl_server);
928                 goto out_eio;
929         case RPC_PROG_MISMATCH:
930                 printk(KERN_WARNING "RPC: call_verify: program %u, version %u unsupported by server %s\n",
931                                 (unsigned int)task->tk_client->cl_prog,
932                                 (unsigned int)task->tk_client->cl_vers,
933                                 task->tk_client->cl_server);
934                 goto out_eio;
935         case RPC_PROC_UNAVAIL:
936                 printk(KERN_WARNING "RPC: call_verify: proc %u unsupported by program %u, version %u on server %s\n",
937                                 (unsigned int)task->tk_msg.rpc_proc,
938                                 (unsigned int)task->tk_client->cl_prog,
939                                 (unsigned int)task->tk_client->cl_vers,
940                                 task->tk_client->cl_server);
941                 goto out_eio;
942         case RPC_GARBAGE_ARGS:
943                 break;                  /* retry */
944         default:
945                 printk(KERN_WARNING "call_verify: server accept status: %x\n", n);
946                 /* Also retry */
947         }
948
949 garbage:
950         dprintk("RPC: %4d call_verify: server saw garbage\n", task->tk_pid);
951         task->tk_client->cl_stats->rpcgarbage++;
952         if (task->tk_garb_retry) {
953                 task->tk_garb_retry--;
954                 dprintk(KERN_WARNING "RPC: garbage, retrying %4d\n", task->tk_pid);
955                 task->tk_action = call_encode;
956                 return NULL;
957         }
958         printk(KERN_WARNING "RPC: garbage, exit EIO\n");
959 out_eio:
960         rpc_exit(task, -EIO);
961         return NULL;
962 }