Merge branch 'for-4.20' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[linux] / net / rxrpc / call_object.c
1 /* RxRPC individual remote procedure call handling
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/spinlock_types.h>
18 #include <net/sock.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
21
22 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
23         [RXRPC_CALL_UNINITIALISED]              = "Uninit  ",
24         [RXRPC_CALL_CLIENT_AWAIT_CONN]          = "ClWtConn",
25         [RXRPC_CALL_CLIENT_SEND_REQUEST]        = "ClSndReq",
26         [RXRPC_CALL_CLIENT_AWAIT_REPLY]         = "ClAwtRpl",
27         [RXRPC_CALL_CLIENT_RECV_REPLY]          = "ClRcvRpl",
28         [RXRPC_CALL_SERVER_PREALLOC]            = "SvPrealc",
29         [RXRPC_CALL_SERVER_SECURING]            = "SvSecure",
30         [RXRPC_CALL_SERVER_ACCEPTING]           = "SvAccept",
31         [RXRPC_CALL_SERVER_RECV_REQUEST]        = "SvRcvReq",
32         [RXRPC_CALL_SERVER_ACK_REQUEST]         = "SvAckReq",
33         [RXRPC_CALL_SERVER_SEND_REPLY]          = "SvSndRpl",
34         [RXRPC_CALL_SERVER_AWAIT_ACK]           = "SvAwtACK",
35         [RXRPC_CALL_COMPLETE]                   = "Complete",
36 };
37
38 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
39         [RXRPC_CALL_SUCCEEDED]                  = "Complete",
40         [RXRPC_CALL_REMOTELY_ABORTED]           = "RmtAbort",
41         [RXRPC_CALL_LOCALLY_ABORTED]            = "LocAbort",
42         [RXRPC_CALL_LOCAL_ERROR]                = "LocError",
43         [RXRPC_CALL_NETWORK_ERROR]              = "NetError",
44 };
45
46 struct kmem_cache *rxrpc_call_jar;
47
48 static void rxrpc_call_timer_expired(struct timer_list *t)
49 {
50         struct rxrpc_call *call = from_timer(call, t, timer);
51
52         _enter("%d", call->debug_id);
53
54         if (call->state < RXRPC_CALL_COMPLETE) {
55                 trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
56                 rxrpc_queue_call(call);
57         }
58 }
59
60 static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
61
62 /*
63  * find an extant server call
64  * - called in process context with IRQs enabled
65  */
66 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
67                                               unsigned long user_call_ID)
68 {
69         struct rxrpc_call *call;
70         struct rb_node *p;
71
72         _enter("%p,%lx", rx, user_call_ID);
73
74         read_lock(&rx->call_lock);
75
76         p = rx->calls.rb_node;
77         while (p) {
78                 call = rb_entry(p, struct rxrpc_call, sock_node);
79
80                 if (user_call_ID < call->user_call_ID)
81                         p = p->rb_left;
82                 else if (user_call_ID > call->user_call_ID)
83                         p = p->rb_right;
84                 else
85                         goto found_extant_call;
86         }
87
88         read_unlock(&rx->call_lock);
89         _leave(" = NULL");
90         return NULL;
91
92 found_extant_call:
93         rxrpc_get_call(call, rxrpc_call_got);
94         read_unlock(&rx->call_lock);
95         _leave(" = %p [%d]", call, atomic_read(&call->usage));
96         return call;
97 }
98
99 /*
100  * allocate a new call
101  */
102 struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
103                                     unsigned int debug_id)
104 {
105         struct rxrpc_call *call;
106         struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
107
108         call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
109         if (!call)
110                 return NULL;
111
112         call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
113                                     sizeof(struct sk_buff *),
114                                     gfp);
115         if (!call->rxtx_buffer)
116                 goto nomem;
117
118         call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
119         if (!call->rxtx_annotations)
120                 goto nomem_2;
121
122         mutex_init(&call->user_mutex);
123
124         /* Prevent lockdep reporting a deadlock false positive between the afs
125          * filesystem and sys_sendmsg() via the mmap sem.
126          */
127         if (rx->sk.sk_kern_sock)
128                 lockdep_set_class(&call->user_mutex,
129                                   &rxrpc_call_user_mutex_lock_class_key);
130
131         timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
132         INIT_WORK(&call->processor, &rxrpc_process_call);
133         INIT_LIST_HEAD(&call->link);
134         INIT_LIST_HEAD(&call->chan_wait_link);
135         INIT_LIST_HEAD(&call->accept_link);
136         INIT_LIST_HEAD(&call->recvmsg_link);
137         INIT_LIST_HEAD(&call->sock_link);
138         init_waitqueue_head(&call->waitq);
139         spin_lock_init(&call->lock);
140         spin_lock_init(&call->notify_lock);
141         spin_lock_init(&call->input_lock);
142         rwlock_init(&call->state_lock);
143         atomic_set(&call->usage, 1);
144         call->debug_id = debug_id;
145         call->tx_total_len = -1;
146         call->next_rx_timo = 20 * HZ;
147         call->next_req_timo = 1 * HZ;
148
149         memset(&call->sock_node, 0xed, sizeof(call->sock_node));
150
151         /* Leave space in the ring to handle a maxed-out jumbo packet */
152         call->rx_winsize = rxrpc_rx_window_size;
153         call->tx_winsize = 16;
154         call->rx_expect_next = 1;
155
156         call->cong_cwnd = 2;
157         call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
158
159         call->rxnet = rxnet;
160         atomic_inc(&rxnet->nr_calls);
161         return call;
162
163 nomem_2:
164         kfree(call->rxtx_buffer);
165 nomem:
166         kmem_cache_free(rxrpc_call_jar, call);
167         return NULL;
168 }
169
170 /*
171  * Allocate a new client call.
172  */
173 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
174                                                   struct sockaddr_rxrpc *srx,
175                                                   gfp_t gfp,
176                                                   unsigned int debug_id)
177 {
178         struct rxrpc_call *call;
179         ktime_t now;
180
181         _enter("");
182
183         call = rxrpc_alloc_call(rx, gfp, debug_id);
184         if (!call)
185                 return ERR_PTR(-ENOMEM);
186         call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
187         call->service_id = srx->srx_service;
188         call->tx_phase = true;
189         now = ktime_get_real();
190         call->acks_latest_ts = now;
191         call->cong_tstamp = now;
192
193         _leave(" = %p", call);
194         return call;
195 }
196
197 /*
198  * Initiate the call ack/resend/expiry timer.
199  */
200 static void rxrpc_start_call_timer(struct rxrpc_call *call)
201 {
202         unsigned long now = jiffies;
203         unsigned long j = now + MAX_JIFFY_OFFSET;
204
205         call->ack_at = j;
206         call->ack_lost_at = j;
207         call->resend_at = j;
208         call->ping_at = j;
209         call->expect_rx_by = j;
210         call->expect_req_by = j;
211         call->expect_term_by = j;
212         call->timer.expires = now;
213 }
214
215 /*
216  * Set up a call for the given parameters.
217  * - Called with the socket lock held, which it must release.
218  * - If it returns a call, the call's lock will need releasing by the caller.
219  */
220 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
221                                          struct rxrpc_conn_parameters *cp,
222                                          struct sockaddr_rxrpc *srx,
223                                          struct rxrpc_call_params *p,
224                                          gfp_t gfp,
225                                          unsigned int debug_id)
226         __releases(&rx->sk.sk_lock.slock)
227         __acquires(&call->user_mutex)
228 {
229         struct rxrpc_call *call, *xcall;
230         struct rxrpc_net *rxnet;
231         struct rb_node *parent, **pp;
232         const void *here = __builtin_return_address(0);
233         int ret;
234
235         _enter("%p,%lx", rx, p->user_call_ID);
236
237         call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
238         if (IS_ERR(call)) {
239                 release_sock(&rx->sk);
240                 _leave(" = %ld", PTR_ERR(call));
241                 return call;
242         }
243
244         call->tx_total_len = p->tx_total_len;
245         trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
246                          here, (const void *)p->user_call_ID);
247
248         /* We need to protect a partially set up call against the user as we
249          * will be acting outside the socket lock.
250          */
251         mutex_lock(&call->user_mutex);
252
253         /* Publish the call, even though it is incompletely set up as yet */
254         write_lock(&rx->call_lock);
255
256         pp = &rx->calls.rb_node;
257         parent = NULL;
258         while (*pp) {
259                 parent = *pp;
260                 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
261
262                 if (p->user_call_ID < xcall->user_call_ID)
263                         pp = &(*pp)->rb_left;
264                 else if (p->user_call_ID > xcall->user_call_ID)
265                         pp = &(*pp)->rb_right;
266                 else
267                         goto error_dup_user_ID;
268         }
269
270         rcu_assign_pointer(call->socket, rx);
271         call->user_call_ID = p->user_call_ID;
272         __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
273         rxrpc_get_call(call, rxrpc_call_got_userid);
274         rb_link_node(&call->sock_node, parent, pp);
275         rb_insert_color(&call->sock_node, &rx->calls);
276         list_add(&call->sock_link, &rx->sock_calls);
277
278         write_unlock(&rx->call_lock);
279
280         rxnet = call->rxnet;
281         write_lock(&rxnet->call_lock);
282         list_add_tail(&call->link, &rxnet->calls);
283         write_unlock(&rxnet->call_lock);
284
285         /* From this point on, the call is protected by its own lock. */
286         release_sock(&rx->sk);
287
288         /* Set up or get a connection record and set the protocol parameters,
289          * including channel number and call ID.
290          */
291         ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
292         if (ret < 0)
293                 goto error;
294
295         trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
296                          here, NULL);
297
298         rxrpc_start_call_timer(call);
299
300         _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
301
302         _leave(" = %p [new]", call);
303         return call;
304
305         /* We unexpectedly found the user ID in the list after taking
306          * the call_lock.  This shouldn't happen unless the user races
307          * with itself and tries to add the same user ID twice at the
308          * same time in different threads.
309          */
310 error_dup_user_ID:
311         write_unlock(&rx->call_lock);
312         release_sock(&rx->sk);
313         ret = -EEXIST;
314
315 error:
316         __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
317                                     RX_CALL_DEAD, ret);
318         trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
319                          here, ERR_PTR(ret));
320         rxrpc_release_call(rx, call);
321         mutex_unlock(&call->user_mutex);
322         rxrpc_put_call(call, rxrpc_call_put);
323         _leave(" = %d", ret);
324         return ERR_PTR(ret);
325 }
326
327 /*
328  * Retry a call to a new address.  It is expected that the Tx queue of the call
329  * will contain data previously packaged for an old call.
330  */
331 int rxrpc_retry_client_call(struct rxrpc_sock *rx,
332                             struct rxrpc_call *call,
333                             struct rxrpc_conn_parameters *cp,
334                             struct sockaddr_rxrpc *srx,
335                             gfp_t gfp)
336 {
337         const void *here = __builtin_return_address(0);
338         int ret;
339
340         /* Set up or get a connection record and set the protocol parameters,
341          * including channel number and call ID.
342          */
343         ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
344         if (ret < 0)
345                 goto error;
346
347         trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
348                          here, NULL);
349
350         rxrpc_start_call_timer(call);
351
352         _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
353
354         if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
355                 rxrpc_queue_call(call);
356
357         _leave(" = 0");
358         return 0;
359
360 error:
361         rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
362                                   RX_CALL_DEAD, ret);
363         trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
364                          here, ERR_PTR(ret));
365         _leave(" = %d", ret);
366         return ret;
367 }
368
369 /*
370  * Set up an incoming call.  call->conn points to the connection.
371  * This is called in BH context and isn't allowed to fail.
372  */
373 void rxrpc_incoming_call(struct rxrpc_sock *rx,
374                          struct rxrpc_call *call,
375                          struct sk_buff *skb)
376 {
377         struct rxrpc_connection *conn = call->conn;
378         struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
379         u32 chan;
380
381         _enter(",%d", call->conn->debug_id);
382
383         rcu_assign_pointer(call->socket, rx);
384         call->call_id           = sp->hdr.callNumber;
385         call->service_id        = sp->hdr.serviceId;
386         call->cid               = sp->hdr.cid;
387         call->state             = RXRPC_CALL_SERVER_ACCEPTING;
388         if (sp->hdr.securityIndex > 0)
389                 call->state     = RXRPC_CALL_SERVER_SECURING;
390         call->cong_tstamp       = skb->tstamp;
391
392         /* Set the channel for this call.  We don't get channel_lock as we're
393          * only defending against the data_ready handler (which we're called
394          * from) and the RESPONSE packet parser (which is only really
395          * interested in call_counter and can cope with a disagreement with the
396          * call pointer).
397          */
398         chan = sp->hdr.cid & RXRPC_CHANNELMASK;
399         conn->channels[chan].call_counter = call->call_id;
400         conn->channels[chan].call_id = call->call_id;
401         rcu_assign_pointer(conn->channels[chan].call, call);
402
403         spin_lock(&conn->params.peer->lock);
404         hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
405         spin_unlock(&conn->params.peer->lock);
406
407         _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
408
409         rxrpc_start_call_timer(call);
410         _leave("");
411 }
412
413 /*
414  * Queue a call's work processor, getting a ref to pass to the work queue.
415  */
416 bool rxrpc_queue_call(struct rxrpc_call *call)
417 {
418         const void *here = __builtin_return_address(0);
419         int n = atomic_fetch_add_unless(&call->usage, 1, 0);
420         if (n == 0)
421                 return false;
422         if (rxrpc_queue_work(&call->processor))
423                 trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
424         else
425                 rxrpc_put_call(call, rxrpc_call_put_noqueue);
426         return true;
427 }
428
429 /*
430  * Queue a call's work processor, passing the callers ref to the work queue.
431  */
432 bool __rxrpc_queue_call(struct rxrpc_call *call)
433 {
434         const void *here = __builtin_return_address(0);
435         int n = atomic_read(&call->usage);
436         ASSERTCMP(n, >=, 1);
437         if (rxrpc_queue_work(&call->processor))
438                 trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
439         else
440                 rxrpc_put_call(call, rxrpc_call_put_noqueue);
441         return true;
442 }
443
444 /*
445  * Note the re-emergence of a call.
446  */
447 void rxrpc_see_call(struct rxrpc_call *call)
448 {
449         const void *here = __builtin_return_address(0);
450         if (call) {
451                 int n = atomic_read(&call->usage);
452
453                 trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
454         }
455 }
456
457 /*
458  * Note the addition of a ref on a call.
459  */
460 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
461 {
462         const void *here = __builtin_return_address(0);
463         int n = atomic_inc_return(&call->usage);
464
465         trace_rxrpc_call(call, op, n, here, NULL);
466 }
467
468 /*
469  * Detach a call from its owning socket.
470  */
471 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
472 {
473         const void *here = __builtin_return_address(0);
474         struct rxrpc_connection *conn = call->conn;
475         bool put = false;
476         int i;
477
478         _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
479
480         trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
481                          here, (const void *)call->flags);
482
483         ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
484
485         spin_lock_bh(&call->lock);
486         if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
487                 BUG();
488         spin_unlock_bh(&call->lock);
489
490         del_timer_sync(&call->timer);
491
492         /* Make sure we don't get any more notifications */
493         write_lock_bh(&rx->recvmsg_lock);
494
495         if (!list_empty(&call->recvmsg_link)) {
496                 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
497                        call, call->events, call->flags);
498                 list_del(&call->recvmsg_link);
499                 put = true;
500         }
501
502         /* list_empty() must return false in rxrpc_notify_socket() */
503         call->recvmsg_link.next = NULL;
504         call->recvmsg_link.prev = NULL;
505
506         write_unlock_bh(&rx->recvmsg_lock);
507         if (put)
508                 rxrpc_put_call(call, rxrpc_call_put);
509
510         write_lock(&rx->call_lock);
511
512         if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
513                 rb_erase(&call->sock_node, &rx->calls);
514                 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
515                 rxrpc_put_call(call, rxrpc_call_put_userid);
516         }
517
518         list_del(&call->sock_link);
519         write_unlock(&rx->call_lock);
520
521         _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
522
523         if (conn)
524                 rxrpc_disconnect_call(call);
525
526         for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
527                 rxrpc_free_skb(call->rxtx_buffer[i],
528                                (call->tx_phase ? rxrpc_skb_tx_cleaned :
529                                 rxrpc_skb_rx_cleaned));
530                 call->rxtx_buffer[i] = NULL;
531         }
532
533         _leave("");
534 }
535
536 /*
537  * Prepare a kernel service call for retry.
538  */
539 int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call)
540 {
541         const void *here = __builtin_return_address(0);
542         int i;
543         u8 last = 0;
544
545         _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
546
547         trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
548                          here, (const void *)call->flags);
549
550         ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
551         ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED);
552         ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED);
553         ASSERT(list_empty(&call->recvmsg_link));
554
555         del_timer_sync(&call->timer);
556
557         _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn);
558
559         if (call->conn)
560                 rxrpc_disconnect_call(call);
561
562         if (rxrpc_is_service_call(call) ||
563             !call->tx_phase ||
564             call->tx_hard_ack != 0 ||
565             call->rx_hard_ack != 0 ||
566             call->rx_top != 0)
567                 return -EINVAL;
568
569         call->state = RXRPC_CALL_UNINITIALISED;
570         call->completion = RXRPC_CALL_SUCCEEDED;
571         call->call_id = 0;
572         call->cid = 0;
573         call->cong_cwnd = 0;
574         call->cong_extra = 0;
575         call->cong_ssthresh = 0;
576         call->cong_mode = 0;
577         call->cong_dup_acks = 0;
578         call->cong_cumul_acks = 0;
579         call->acks_lowest_nak = 0;
580
581         for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
582                 last |= call->rxtx_annotations[i];
583                 call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST;
584                 call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS;
585         }
586
587         _leave(" = 0");
588         return 0;
589 }
590
591 /*
592  * release all the calls associated with a socket
593  */
594 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
595 {
596         struct rxrpc_call *call;
597
598         _enter("%p", rx);
599
600         while (!list_empty(&rx->to_be_accepted)) {
601                 call = list_entry(rx->to_be_accepted.next,
602                                   struct rxrpc_call, accept_link);
603                 list_del(&call->accept_link);
604                 rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
605                 rxrpc_put_call(call, rxrpc_call_put);
606         }
607
608         while (!list_empty(&rx->sock_calls)) {
609                 call = list_entry(rx->sock_calls.next,
610                                   struct rxrpc_call, sock_link);
611                 rxrpc_get_call(call, rxrpc_call_got);
612                 rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
613                 rxrpc_send_abort_packet(call);
614                 rxrpc_release_call(rx, call);
615                 rxrpc_put_call(call, rxrpc_call_put);
616         }
617
618         _leave("");
619 }
620
621 /*
622  * release a call
623  */
624 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
625 {
626         struct rxrpc_net *rxnet = call->rxnet;
627         const void *here = __builtin_return_address(0);
628         int n;
629
630         ASSERT(call != NULL);
631
632         n = atomic_dec_return(&call->usage);
633         trace_rxrpc_call(call, op, n, here, NULL);
634         ASSERTCMP(n, >=, 0);
635         if (n == 0) {
636                 _debug("call %d dead", call->debug_id);
637                 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
638
639                 if (!list_empty(&call->link)) {
640                         write_lock(&rxnet->call_lock);
641                         list_del_init(&call->link);
642                         write_unlock(&rxnet->call_lock);
643                 }
644
645                 rxrpc_cleanup_call(call);
646         }
647 }
648
649 /*
650  * Final call destruction under RCU.
651  */
652 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
653 {
654         struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
655         struct rxrpc_net *rxnet = call->rxnet;
656
657         rxrpc_put_peer(call->peer);
658         kfree(call->rxtx_buffer);
659         kfree(call->rxtx_annotations);
660         kmem_cache_free(rxrpc_call_jar, call);
661         if (atomic_dec_and_test(&rxnet->nr_calls))
662                 wake_up_var(&rxnet->nr_calls);
663 }
664
665 /*
666  * clean up a call
667  */
668 void rxrpc_cleanup_call(struct rxrpc_call *call)
669 {
670         int i;
671
672         _net("DESTROY CALL %d", call->debug_id);
673
674         memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
675
676         del_timer_sync(&call->timer);
677
678         ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
679         ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
680         ASSERTCMP(call->conn, ==, NULL);
681
682         /* Clean up the Rx/Tx buffer */
683         for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++)
684                 rxrpc_free_skb(call->rxtx_buffer[i],
685                                (call->tx_phase ? rxrpc_skb_tx_cleaned :
686                                 rxrpc_skb_rx_cleaned));
687
688         rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
689
690         call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
691 }
692
693 /*
694  * Make sure that all calls are gone from a network namespace.  To reach this
695  * point, any open UDP sockets in that namespace must have been closed, so any
696  * outstanding calls cannot be doing I/O.
697  */
698 void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
699 {
700         struct rxrpc_call *call;
701
702         _enter("");
703
704         if (list_empty(&rxnet->calls))
705                 return;
706
707         write_lock(&rxnet->call_lock);
708
709         while (!list_empty(&rxnet->calls)) {
710                 call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
711                 _debug("Zapping call %p", call);
712
713                 rxrpc_see_call(call);
714                 list_del_init(&call->link);
715
716                 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
717                        call, atomic_read(&call->usage),
718                        rxrpc_call_states[call->state],
719                        call->flags, call->events);
720
721                 write_unlock(&rxnet->call_lock);
722                 cond_resched();
723                 write_lock(&rxnet->call_lock);
724         }
725
726         write_unlock(&rxnet->call_lock);
727
728         atomic_dec(&rxnet->nr_calls);
729         wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
730 }