2 * linux/net/sunrpc/rpcclnt.c
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP reconnect handling (when finished).
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
24 #include <asm/system.h>
26 #include <linux/types.h>
28 #include <linux/slab.h>
30 #include <linux/utsname.h>
32 #include <linux/sunrpc/clnt.h>
34 #include <linux/nfs.h>
37 #define RPC_SLACK_SPACE 512 /* total overkill */
40 # define RPCDBG_FACILITY RPCDBG_CALL
43 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
46 static void call_start(struct rpc_task *task);
47 static void call_reserve(struct rpc_task *task);
48 static void call_reserveresult(struct rpc_task *task);
49 static void call_allocate(struct rpc_task *task);
50 static void call_encode(struct rpc_task *task);
51 static void call_decode(struct rpc_task *task);
52 static void call_bind(struct rpc_task *task);
53 static void call_transmit(struct rpc_task *task);
54 static void call_status(struct rpc_task *task);
55 static void call_refresh(struct rpc_task *task);
56 static void call_refreshresult(struct rpc_task *task);
57 static void call_timeout(struct rpc_task *task);
58 static void call_connect(struct rpc_task *task);
59 static void call_connect_status(struct rpc_task *);
60 static u32 * call_header(struct rpc_task *task);
61 static u32 * call_verify(struct rpc_task *task);
65 * Create an RPC client
66 * FIXME: This should also take a flags argument (as in task->tk_flags).
67 * It's called (among others) from pmap_create_client, which may in
68 * turn be called by an async task. In this case, rpciod should not be
69 * made to sleep too long.
72 rpc_create_client(struct rpc_xprt *xprt, char *servname,
73 struct rpc_program *program, u32 vers, int flavor)
75 struct rpc_version *version;
76 struct rpc_clnt *clnt = NULL;
78 dprintk("RPC: creating %s client for %s (xprt %p)\n",
79 program->name, servname, xprt);
83 if (vers >= program->nrvers || !(version = program->version[vers]))
86 clnt = (struct rpc_clnt *) rpc_allocate(0, sizeof(*clnt));
89 memset(clnt, 0, sizeof(*clnt));
90 atomic_set(&clnt->cl_users, 0);
93 clnt->cl_procinfo = version->procs;
94 clnt->cl_maxproc = version->nrprocs;
95 clnt->cl_server = servname;
96 clnt->cl_protname = program->name;
97 clnt->cl_port = xprt->addr.sin_port;
98 clnt->cl_prog = program->number;
99 clnt->cl_vers = version->number;
100 clnt->cl_prot = xprt->prot;
101 clnt->cl_stats = program->stats;
102 INIT_RPC_WAITQ(&clnt->cl_bindwait, "bindwait");
105 clnt->cl_autobind = 1;
107 rpc_init_rtt(&clnt->cl_rtt, xprt->timeout.to_initval);
109 if (!rpcauth_create(flavor, clnt))
112 /* save the nodename */
113 clnt->cl_nodelen = strlen(system_utsname.nodename);
114 if (clnt->cl_nodelen > UNX_MAXNODENAME)
115 clnt->cl_nodelen = UNX_MAXNODENAME;
116 memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);
121 printk(KERN_INFO "RPC: out of memory in rpc_create_client\n");
124 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %d)\n",
132 * Properly shut down an RPC client, terminating all outstanding
133 * requests. Note that we must be certain that cl_oneshot and
134 * cl_dead are cleared, or else the client would be destroyed
135 * when the last task releases it.
138 rpc_shutdown_client(struct rpc_clnt *clnt)
140 dprintk("RPC: shutting down %s client for %s\n",
141 clnt->cl_protname, clnt->cl_server);
142 while (atomic_read(&clnt->cl_users)) {
144 dprintk("RPC: rpc_shutdown_client: client %s, tasks=%d\n",
145 clnt->cl_protname, atomic_read(&clnt->cl_users));
147 /* Don't let rpc_release_client destroy us */
148 clnt->cl_oneshot = 0;
150 rpc_killall_tasks(clnt);
151 sleep_on_timeout(&destroy_wait, 1*HZ);
153 return rpc_destroy_client(clnt);
157 * Delete an RPC client
160 rpc_destroy_client(struct rpc_clnt *clnt)
162 dprintk("RPC: destroying %s client for %s\n",
163 clnt->cl_protname, clnt->cl_server);
166 rpcauth_destroy(clnt->cl_auth);
167 clnt->cl_auth = NULL;
170 xprt_destroy(clnt->cl_xprt);
171 clnt->cl_xprt = NULL;
178 * Release an RPC client
181 rpc_release_client(struct rpc_clnt *clnt)
183 dprintk("RPC: rpc_release_client(%p, %d)\n",
184 clnt, atomic_read(&clnt->cl_users));
186 if (!atomic_dec_and_test(&clnt->cl_users))
188 wake_up(&destroy_wait);
189 if (clnt->cl_oneshot || clnt->cl_dead)
190 rpc_destroy_client(clnt);
194 * Default callback for async RPC calls
197 rpc_default_callback(struct rpc_task *task)
202 * Export the signal mask handling for aysnchronous code that
203 * sleeps on RPC calls
206 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
208 unsigned long sigallow = sigmask(SIGKILL);
209 unsigned long irqflags;
211 /* Turn off various signals */
213 struct k_sigaction *action = current->sig->action;
214 if (action[SIGINT-1].sa.sa_handler == SIG_DFL)
215 sigallow |= sigmask(SIGINT);
216 if (action[SIGQUIT-1].sa.sa_handler == SIG_DFL)
217 sigallow |= sigmask(SIGQUIT);
219 spin_lock_irqsave(¤t->sigmask_lock, irqflags);
220 *oldset = current->blocked;
221 siginitsetinv(¤t->blocked, sigallow & ~oldset->sig[0]);
222 recalc_sigpending(current);
223 spin_unlock_irqrestore(¤t->sigmask_lock, irqflags);
226 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
228 unsigned long irqflags;
230 spin_lock_irqsave(¤t->sigmask_lock, irqflags);
231 current->blocked = *oldset;
232 recalc_sigpending(current);
233 spin_unlock_irqrestore(¤t->sigmask_lock, irqflags);
237 * New rpc_call implementation
239 int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
241 struct rpc_task my_task, *task = &my_task;
245 /* If this client is slain all further I/O fails */
249 if (flags & RPC_TASK_ASYNC) {
250 printk("rpc_call_sync: Illegal flag combination for synchronous task\n");
251 flags &= ~RPC_TASK_ASYNC;
254 rpc_clnt_sigmask(clnt, &oldset);
256 /* Create/initialize a new RPC task */
257 rpc_init_task(task, clnt, NULL, flags);
258 rpc_call_setup(task, msg, 0);
260 /* Set up the call info struct and execute the task */
261 if (task->tk_status == 0)
262 status = rpc_execute(task);
264 status = task->tk_status;
265 rpc_release_task(task);
268 rpc_clnt_sigunmask(clnt, &oldset);
274 * New rpc_call implementation
277 rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
278 rpc_action callback, void *data)
280 struct rpc_task *task;
284 /* If this client is slain all further I/O fails */
288 flags |= RPC_TASK_ASYNC;
290 rpc_clnt_sigmask(clnt, &oldset);
292 /* Create/initialize a new RPC task */
294 callback = rpc_default_callback;
296 if (!(task = rpc_new_task(clnt, callback, flags)))
298 task->tk_calldata = data;
300 rpc_call_setup(task, msg, 0);
302 /* Set up the call info struct and execute the task */
303 if (task->tk_status == 0)
304 status = rpc_execute(task);
306 status = task->tk_status;
307 rpc_release_task(task);
311 rpc_clnt_sigunmask(clnt, &oldset);
318 rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags)
321 task->tk_flags |= flags;
322 /* Bind the user cred */
323 if (task->tk_msg.rpc_cred != NULL) {
324 rpcauth_holdcred(task);
326 rpcauth_bindcred(task);
328 if (task->tk_status == 0)
329 task->tk_action = call_start;
331 task->tk_action = NULL;
335 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
337 struct rpc_xprt *xprt = clnt->cl_xprt;
341 xprt->sndsize = sndsize + RPC_SLACK_SPACE;
344 xprt->rcvsize = rcvsize + RPC_SLACK_SPACE;
345 xprt_sock_setbufsize(xprt);
349 * Restart an (async) RPC call. Usually called from within the
353 rpc_restart_call(struct rpc_task *task)
355 if (RPC_ASSASSINATED(task))
358 task->tk_action = call_start;
364 * Other FSM states can be visited zero or more times, but
365 * this state is visited exactly once for each RPC.
368 call_start(struct rpc_task *task)
370 struct rpc_clnt *clnt = task->tk_client;
372 if (task->tk_msg.rpc_proc > clnt->cl_maxproc) {
373 printk(KERN_ERR "%s (vers %d): bad procedure number %d\n",
374 clnt->cl_protname, clnt->cl_vers,
375 task->tk_msg.rpc_proc);
376 rpc_exit(task, -EIO);
380 dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid,
381 clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc,
382 (RPC_IS_ASYNC(task) ? "async" : "sync"));
384 /* Increment call count */
385 rpcproc_count(clnt, task->tk_msg.rpc_proc)++;
386 clnt->cl_stats->rpccnt++;
387 task->tk_action = call_reserve;
391 * 1. Reserve an RPC call slot
394 call_reserve(struct rpc_task *task)
396 dprintk("RPC: %4d call_reserve\n", task->tk_pid);
398 if (!rpcauth_uptodatecred(task)) {
399 task->tk_action = call_refresh;
404 task->tk_action = call_reserveresult;
409 * 1b. Grok the result of xprt_reserve()
412 call_reserveresult(struct rpc_task *task)
414 int status = task->tk_status;
416 dprintk("RPC: %4d call_reserveresult (status %d)\n",
417 task->tk_pid, task->tk_status);
420 * After a call to xprt_reserve(), we must have either
421 * a request slot or else an error status.
425 if (task->tk_rqstp) {
426 task->tk_action = call_allocate;
430 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
431 __FUNCTION__, status);
432 rpc_exit(task, -EIO);
437 * Even though there was an error, we may have acquired
438 * a request slot somehow. Make sure not to leak it.
440 if (task->tk_rqstp) {
441 printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
442 __FUNCTION__, status);
447 case -EAGAIN: /* woken up; retry */
448 task->tk_action = call_reserve;
450 case -EIO: /* probably a shutdown */
453 printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
454 __FUNCTION__, status);
457 rpc_exit(task, status);
461 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
462 * (Note: buffer memory is freed in rpc_task_release).
465 call_allocate(struct rpc_task *task)
467 struct rpc_clnt *clnt = task->tk_client;
470 dprintk("RPC: %4d call_allocate (status %d)\n",
471 task->tk_pid, task->tk_status);
472 task->tk_action = call_encode;
476 /* FIXME: compute buffer requirements more exactly using
478 bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc) + RPC_SLACK_SPACE;
480 if ((task->tk_buffer = rpc_malloc(task, bufsiz << 1)) != NULL)
482 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
484 if (RPC_IS_ASYNC(task) || !(task->tk_client->cl_intr && signalled())) {
486 task->tk_action = call_reserve;
487 rpc_delay(task, HZ>>4);
491 rpc_exit(task, -ERESTARTSYS);
495 * 3. Encode arguments of an RPC call
498 call_encode(struct rpc_task *task)
500 struct rpc_clnt *clnt = task->tk_client;
501 struct rpc_rqst *req = task->tk_rqstp;
502 struct xdr_buf *sndbuf = &req->rq_snd_buf;
503 struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
509 dprintk("RPC: %4d call_encode (status %d)\n",
510 task->tk_pid, task->tk_status);
512 task->tk_action = call_bind;
514 /* Default buffer setup */
515 bufsiz = rpcproc_bufsiz(clnt, task->tk_msg.rpc_proc)+RPC_SLACK_SPACE;
516 sndbuf->head[0].iov_base = (void *)task->tk_buffer;
517 sndbuf->head[0].iov_len = bufsiz;
518 sndbuf->tail[0].iov_len = 0;
519 sndbuf->page_len = 0;
521 rcvbuf->head[0].iov_base = (void *)((char *)task->tk_buffer + bufsiz);
522 rcvbuf->head[0].iov_len = bufsiz;
523 rcvbuf->tail[0].iov_len = 0;
524 rcvbuf->page_len = 0;
525 rcvbuf->len = bufsiz;
527 /* Zero buffer so we have automatic zero-padding of opaque & string */
528 memset(task->tk_buffer, 0, bufsiz);
530 /* Encode header and provided arguments */
531 encode = rpcproc_encode(clnt, task->tk_msg.rpc_proc);
532 if (!(p = call_header(task))) {
533 printk(KERN_INFO "RPC: call_header failed, exit EIO\n");
534 rpc_exit(task, -EIO);
536 if (encode && (status = encode(req, p, task->tk_msg.rpc_argp)) < 0) {
537 printk(KERN_WARNING "%s: can't encode arguments: %d\n",
538 clnt->cl_protname, -status);
539 rpc_exit(task, status);
544 * 4. Get the server port number if not yet set
547 call_bind(struct rpc_task *task)
549 struct rpc_clnt *clnt = task->tk_client;
550 struct rpc_xprt *xprt = clnt->cl_xprt;
552 dprintk("RPC: %4d call_bind xprt %p %s connected\n", task->tk_pid,
553 xprt, (xprt_connected(xprt) ? "is" : "is not"));
555 task->tk_action = (xprt_connected(xprt)) ? call_transmit : call_connect;
557 if (!clnt->cl_port) {
558 task->tk_action = call_connect;
559 task->tk_timeout = clnt->cl_timeout.to_maxval;
560 rpc_getport(task, clnt);
565 * 4a. Establish socket
566 * Connect to the RPC server (TCP case)
569 call_connect(struct rpc_task *task)
571 struct rpc_clnt *clnt = task->tk_client;
573 dprintk("RPC: %4d call_connect status %d\n",
574 task->tk_pid, task->tk_status);
576 if (xprt_connected(clnt->cl_xprt)) {
577 task->tk_action = call_transmit;
580 task->tk_action = call_connect_status;
581 if (task->tk_status < 0)
587 * 4b. Sort out reconnection result
589 static void call_connect_status(struct rpc_task *task)
591 struct rpc_clnt *clnt = task->tk_client;
592 int status = task->tk_status;
596 clnt->cl_stats->netreconn++;
597 task->tk_action = call_transmit;
601 /* Something failed: we may have to rebind */
602 if (clnt->cl_autobind)
610 task->tk_action = (clnt->cl_port == 0) ? call_bind : call_connect;
613 rpc_exit(task, status);
618 * 5. Transmit the RPC request, and wait for reply
621 call_transmit(struct rpc_task *task)
623 struct rpc_clnt *clnt = task->tk_client;
625 dprintk("RPC: %4d call_transmit (status %d)\n",
626 task->tk_pid, task->tk_status);
628 task->tk_action = call_status;
629 if (task->tk_status < 0)
632 if (!rpcproc_decode(clnt, task->tk_msg.rpc_proc) && task->tk_status >= 0) {
633 task->tk_action = NULL;
634 rpc_wake_up_task(task);
639 * 6. Sort out the RPC call status
642 call_status(struct rpc_task *task)
644 struct rpc_clnt *clnt = task->tk_client;
645 struct rpc_xprt *xprt = clnt->cl_xprt;
646 struct rpc_rqst *req = task->tk_rqstp;
650 if (req->rq_received > 0 && !req->rq_bytes_sent)
651 task->tk_status = req->rq_received;
653 dprintk("RPC: %4d call_status (status %d)\n",
654 task->tk_pid, task->tk_status);
656 status = task->tk_status;
658 task->tk_action = call_decode;
665 task->tk_action = call_timeout;
669 req->rq_bytes_sent = 0;
670 if (clnt->cl_autobind || !clnt->cl_port) {
672 task->tk_action = call_bind;
675 task->tk_action = call_connect;
678 * Sleep and dream of an open connection
680 task->tk_timeout = 5 * HZ;
681 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
684 task->tk_action = call_transmit;
688 printk("%s: RPC call returned error %d\n",
689 clnt->cl_protname, -status);
690 rpc_exit(task, status);
695 * 6a. Handle RPC timeout
696 * We do not release the request slot, so we keep using the
697 * same XID for all retransmits.
700 call_timeout(struct rpc_task *task)
702 struct rpc_clnt *clnt = task->tk_client;
703 struct rpc_timeout *to = &task->tk_rqstp->rq_timeout;
705 if (xprt_adjust_timeout(to)) {
706 dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid);
709 to->to_retries = clnt->cl_timeout.to_retries;
711 dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid);
712 if (clnt->cl_softrtry) {
714 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
715 clnt->cl_protname, clnt->cl_server);
716 rpc_exit(task, -EIO);
720 if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) {
721 task->tk_flags |= RPC_CALL_MAJORSEEN;
722 printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
723 clnt->cl_protname, clnt->cl_server);
725 if (clnt->cl_autobind)
729 clnt->cl_stats->rpcretrans++;
730 task->tk_action = call_bind;
735 * 7. Decode the RPC reply
738 call_decode(struct rpc_task *task)
740 struct rpc_clnt *clnt = task->tk_client;
741 struct rpc_rqst *req = task->tk_rqstp;
742 kxdrproc_t decode = rpcproc_decode(clnt, task->tk_msg.rpc_proc);
745 dprintk("RPC: %4d call_decode (status %d)\n",
746 task->tk_pid, task->tk_status);
748 if (clnt->cl_chatty && (task->tk_flags & RPC_CALL_MAJORSEEN)) {
749 printk(KERN_NOTICE "%s: server %s OK\n",
750 clnt->cl_protname, clnt->cl_server);
751 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
754 if (task->tk_status < 12) {
755 if (!clnt->cl_softrtry) {
756 task->tk_action = call_transmit;
757 clnt->cl_stats->rpcretrans++;
760 printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n",
761 clnt->cl_protname, task->tk_status);
762 rpc_exit(task, -EIO);
766 /* Check that the softirq receive buffer is valid */
767 if (unlikely(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
768 sizeof(req->rq_rcv_buf)) != 0))
769 printk(KERN_WARNING "%s: receive buffer is inconsistent. Please contact maintainer.\n",
772 /* Verify the RPC header */
773 if (!(p = call_verify(task))) {
775 * When call_verfiy sets tk_action to NULL (via task_exit)
776 * a non-retry-able error has occurred (like the server
777 * not supporting a particular procedure call).
779 if (task->tk_action == NULL)
784 * The following is an NFS-specific hack to cater for setuid
785 * processes whose uid is mapped to nobody on the server.
787 if (task->tk_client->cl_droppriv &&
788 (ntohl(*p) == NFSERR_ACCES || ntohl(*p) == NFSERR_PERM)) {
789 if (RPC_IS_SETUID(task) && task->tk_suid_retry) {
790 dprintk("RPC: %4d retry squashed uid\n", task->tk_pid);
791 task->tk_flags ^= RPC_CALL_REALUID;
792 task->tk_action = call_encode;
793 task->tk_suid_retry--;
798 task->tk_action = NULL;
801 task->tk_status = decode(req, p, task->tk_msg.rpc_resp);
802 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
806 req->rq_received = 0;
811 * 8. Refresh the credentials if rejected by the server
814 call_refresh(struct rpc_task *task)
816 dprintk("RPC: %4d call_refresh\n", task->tk_pid);
818 xprt_release(task); /* Must do to obtain new XID */
819 task->tk_action = call_refreshresult;
821 task->tk_client->cl_stats->rpcauthrefresh++;
822 rpcauth_refreshcred(task);
826 * 8a. Process the results of a credential refresh
829 call_refreshresult(struct rpc_task *task)
831 dprintk("RPC: %4d call_refreshresult (status %d)\n",
832 task->tk_pid, task->tk_status);
834 if (task->tk_status < 0)
835 rpc_exit(task, -EACCES);
837 task->tk_action = call_reserve;
841 * Call header serialization
844 call_header(struct rpc_task *task)
846 struct rpc_clnt *clnt = task->tk_client;
847 struct rpc_xprt *xprt = clnt->cl_xprt;
848 struct rpc_rqst *req = task->tk_rqstp;
849 u32 *p = req->rq_svec[0].iov_base;
851 /* FIXME: check buffer size? */
853 *p++ = 0; /* fill in later */
854 *p++ = req->rq_xid; /* XID */
855 *p++ = htonl(RPC_CALL); /* CALL */
856 *p++ = htonl(RPC_VERSION); /* RPC version */
857 *p++ = htonl(clnt->cl_prog); /* program number */
858 *p++ = htonl(clnt->cl_vers); /* program version */
859 *p++ = htonl(task->tk_msg.rpc_proc); /* procedure */
860 return rpcauth_marshcred(task, p);
864 * Reply header verification
867 call_verify(struct rpc_task *task)
869 u32 *p = task->tk_rqstp->rq_rvec[0].iov_base, n;
871 p += 1; /* skip XID */
873 if ((n = ntohl(*p++)) != RPC_REPLY) {
874 printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n);
877 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
880 if ((n = ntohl(*p++)) != RPC_AUTH_ERROR) {
881 printk(KERN_WARNING "call_verify: RPC call rejected: %x\n", n);
883 switch ((n = ntohl(*p++))) {
884 case RPC_AUTH_REJECTEDCRED:
885 case RPC_AUTH_REJECTEDVERF:
886 if (!task->tk_cred_retry)
888 task->tk_cred_retry--;
889 dprintk("RPC: %4d call_verify: retry stale creds\n",
891 rpcauth_invalcred(task);
892 task->tk_action = call_refresh;
894 case RPC_AUTH_BADCRED:
895 case RPC_AUTH_BADVERF:
896 /* possibly garbled cred/verf? */
897 if (!task->tk_garb_retry)
899 task->tk_garb_retry--;
900 dprintk("RPC: %4d call_verify: retry garbled creds\n",
902 task->tk_action = call_encode;
904 case RPC_AUTH_TOOWEAK:
905 printk(KERN_NOTICE "call_verify: server requires stronger "
906 "authentication.\n");
909 printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n);
912 dprintk("RPC: %4d call_verify: call rejected %d\n",
914 rpc_exit(task, error);
917 if (!(p = rpcauth_checkverf(task, p))) {
918 printk(KERN_WARNING "call_verify: auth check failed\n");
919 goto garbage; /* bad verifier, retry */
921 switch ((n = ntohl(*p++))) {
924 case RPC_PROG_UNAVAIL:
925 printk(KERN_WARNING "RPC: call_verify: program %u is unsupported by server %s\n",
926 (unsigned int)task->tk_client->cl_prog,
927 task->tk_client->cl_server);
929 case RPC_PROG_MISMATCH:
930 printk(KERN_WARNING "RPC: call_verify: program %u, version %u unsupported by server %s\n",
931 (unsigned int)task->tk_client->cl_prog,
932 (unsigned int)task->tk_client->cl_vers,
933 task->tk_client->cl_server);
935 case RPC_PROC_UNAVAIL:
936 printk(KERN_WARNING "RPC: call_verify: proc %u unsupported by program %u, version %u on server %s\n",
937 (unsigned int)task->tk_msg.rpc_proc,
938 (unsigned int)task->tk_client->cl_prog,
939 (unsigned int)task->tk_client->cl_vers,
940 task->tk_client->cl_server);
942 case RPC_GARBAGE_ARGS:
945 printk(KERN_WARNING "call_verify: server accept status: %x\n", n);
950 dprintk("RPC: %4d call_verify: server saw garbage\n", task->tk_pid);
951 task->tk_client->cl_stats->rpcgarbage++;
952 if (task->tk_garb_retry) {
953 task->tk_garb_retry--;
954 dprintk(KERN_WARNING "RPC: garbage, retrying %4d\n", task->tk_pid);
955 task->tk_action = call_encode;
958 printk(KERN_WARNING "RPC: garbage, exit EIO\n");
960 rpc_exit(task, -EIO);