Loading...
1/* RxRPC individual remote procedure call handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/circ_buf.h>
17#include <linux/spinlock_types.h>
18#include <net/sock.h>
19#include <net/af_rxrpc.h>
20#include "ar-internal.h"
21
22const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
23 [RXRPC_CALL_UNINITIALISED] = "Uninit ",
24 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
25 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
26 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
27 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
28 [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
29 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
30 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
31 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
32 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
33 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
34 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
35 [RXRPC_CALL_COMPLETE] = "Complete",
36};
37
38const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
39 [RXRPC_CALL_SUCCEEDED] = "Complete",
40 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
41 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
42 [RXRPC_CALL_LOCAL_ERROR] = "LocError",
43 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
44};
45
46struct kmem_cache *rxrpc_call_jar;
47
48static void rxrpc_call_timer_expired(struct timer_list *t)
49{
50 struct rxrpc_call *call = from_timer(call, t, timer);
51
52 _enter("%d", call->debug_id);
53
54 if (call->state < RXRPC_CALL_COMPLETE) {
55 trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
56 rxrpc_queue_call(call);
57 }
58}
59
60static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
61
62/*
63 * find an extant server call
64 * - called in process context with IRQs enabled
65 */
66struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
67 unsigned long user_call_ID)
68{
69 struct rxrpc_call *call;
70 struct rb_node *p;
71
72 _enter("%p,%lx", rx, user_call_ID);
73
74 read_lock(&rx->call_lock);
75
76 p = rx->calls.rb_node;
77 while (p) {
78 call = rb_entry(p, struct rxrpc_call, sock_node);
79
80 if (user_call_ID < call->user_call_ID)
81 p = p->rb_left;
82 else if (user_call_ID > call->user_call_ID)
83 p = p->rb_right;
84 else
85 goto found_extant_call;
86 }
87
88 read_unlock(&rx->call_lock);
89 _leave(" = NULL");
90 return NULL;
91
92found_extant_call:
93 rxrpc_get_call(call, rxrpc_call_got);
94 read_unlock(&rx->call_lock);
95 _leave(" = %p [%d]", call, atomic_read(&call->usage));
96 return call;
97}
98
99/*
100 * allocate a new call
101 */
102struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
103 unsigned int debug_id)
104{
105 struct rxrpc_call *call;
106 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
107
108 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
109 if (!call)
110 return NULL;
111
112 call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
113 sizeof(struct sk_buff *),
114 gfp);
115 if (!call->rxtx_buffer)
116 goto nomem;
117
118 call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
119 if (!call->rxtx_annotations)
120 goto nomem_2;
121
122 mutex_init(&call->user_mutex);
123
124 /* Prevent lockdep reporting a deadlock false positive between the afs
125 * filesystem and sys_sendmsg() via the mmap sem.
126 */
127 if (rx->sk.sk_kern_sock)
128 lockdep_set_class(&call->user_mutex,
129 &rxrpc_call_user_mutex_lock_class_key);
130
131 timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
132 INIT_WORK(&call->processor, &rxrpc_process_call);
133 INIT_LIST_HEAD(&call->link);
134 INIT_LIST_HEAD(&call->chan_wait_link);
135 INIT_LIST_HEAD(&call->accept_link);
136 INIT_LIST_HEAD(&call->recvmsg_link);
137 INIT_LIST_HEAD(&call->sock_link);
138 init_waitqueue_head(&call->waitq);
139 spin_lock_init(&call->lock);
140 spin_lock_init(&call->notify_lock);
141 rwlock_init(&call->state_lock);
142 atomic_set(&call->usage, 1);
143 call->debug_id = debug_id;
144 call->tx_total_len = -1;
145 call->next_rx_timo = 20 * HZ;
146 call->next_req_timo = 1 * HZ;
147
148 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
149
150 /* Leave space in the ring to handle a maxed-out jumbo packet */
151 call->rx_winsize = rxrpc_rx_window_size;
152 call->tx_winsize = 16;
153 call->rx_expect_next = 1;
154
155 call->cong_cwnd = 2;
156 call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
157
158 call->rxnet = rxnet;
159 atomic_inc(&rxnet->nr_calls);
160 return call;
161
162nomem_2:
163 kfree(call->rxtx_buffer);
164nomem:
165 kmem_cache_free(rxrpc_call_jar, call);
166 return NULL;
167}
168
169/*
170 * Allocate a new client call.
171 */
172static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
173 struct sockaddr_rxrpc *srx,
174 gfp_t gfp,
175 unsigned int debug_id)
176{
177 struct rxrpc_call *call;
178 ktime_t now;
179
180 _enter("");
181
182 call = rxrpc_alloc_call(rx, gfp, debug_id);
183 if (!call)
184 return ERR_PTR(-ENOMEM);
185 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
186 call->service_id = srx->srx_service;
187 call->tx_phase = true;
188 now = ktime_get_real();
189 call->acks_latest_ts = now;
190 call->cong_tstamp = now;
191
192 _leave(" = %p", call);
193 return call;
194}
195
196/*
197 * Initiate the call ack/resend/expiry timer.
198 */
199static void rxrpc_start_call_timer(struct rxrpc_call *call)
200{
201 unsigned long now = jiffies;
202 unsigned long j = now + MAX_JIFFY_OFFSET;
203
204 call->ack_at = j;
205 call->ack_lost_at = j;
206 call->resend_at = j;
207 call->ping_at = j;
208 call->expect_rx_by = j;
209 call->expect_req_by = j;
210 call->expect_term_by = j;
211 call->timer.expires = now;
212}
213
214/*
215 * Set up a call for the given parameters.
216 * - Called with the socket lock held, which it must release.
217 * - If it returns a call, the call's lock will need releasing by the caller.
218 */
219struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
220 struct rxrpc_conn_parameters *cp,
221 struct sockaddr_rxrpc *srx,
222 struct rxrpc_call_params *p,
223 gfp_t gfp,
224 unsigned int debug_id)
225 __releases(&rx->sk.sk_lock.slock)
226 __acquires(&call->user_mutex)
227{
228 struct rxrpc_call *call, *xcall;
229 struct rxrpc_net *rxnet;
230 struct rb_node *parent, **pp;
231 const void *here = __builtin_return_address(0);
232 int ret;
233
234 _enter("%p,%lx", rx, p->user_call_ID);
235
236 call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
237 if (IS_ERR(call)) {
238 release_sock(&rx->sk);
239 _leave(" = %ld", PTR_ERR(call));
240 return call;
241 }
242
243 call->tx_total_len = p->tx_total_len;
244 trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
245 here, (const void *)p->user_call_ID);
246
247 /* We need to protect a partially set up call against the user as we
248 * will be acting outside the socket lock.
249 */
250 mutex_lock(&call->user_mutex);
251
252 /* Publish the call, even though it is incompletely set up as yet */
253 write_lock(&rx->call_lock);
254
255 pp = &rx->calls.rb_node;
256 parent = NULL;
257 while (*pp) {
258 parent = *pp;
259 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
260
261 if (p->user_call_ID < xcall->user_call_ID)
262 pp = &(*pp)->rb_left;
263 else if (p->user_call_ID > xcall->user_call_ID)
264 pp = &(*pp)->rb_right;
265 else
266 goto error_dup_user_ID;
267 }
268
269 rcu_assign_pointer(call->socket, rx);
270 call->user_call_ID = p->user_call_ID;
271 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
272 rxrpc_get_call(call, rxrpc_call_got_userid);
273 rb_link_node(&call->sock_node, parent, pp);
274 rb_insert_color(&call->sock_node, &rx->calls);
275 list_add(&call->sock_link, &rx->sock_calls);
276
277 write_unlock(&rx->call_lock);
278
279 rxnet = call->rxnet;
280 write_lock(&rxnet->call_lock);
281 list_add_tail(&call->link, &rxnet->calls);
282 write_unlock(&rxnet->call_lock);
283
284 /* From this point on, the call is protected by its own lock. */
285 release_sock(&rx->sk);
286
287 /* Set up or get a connection record and set the protocol parameters,
288 * including channel number and call ID.
289 */
290 ret = rxrpc_connect_call(call, cp, srx, gfp);
291 if (ret < 0)
292 goto error;
293
294 trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
295 here, NULL);
296
297 rxrpc_start_call_timer(call);
298
299 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
300
301 _leave(" = %p [new]", call);
302 return call;
303
304 /* We unexpectedly found the user ID in the list after taking
305 * the call_lock. This shouldn't happen unless the user races
306 * with itself and tries to add the same user ID twice at the
307 * same time in different threads.
308 */
309error_dup_user_ID:
310 write_unlock(&rx->call_lock);
311 release_sock(&rx->sk);
312 ret = -EEXIST;
313
314error:
315 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
316 RX_CALL_DEAD, ret);
317 trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
318 here, ERR_PTR(ret));
319 rxrpc_release_call(rx, call);
320 mutex_unlock(&call->user_mutex);
321 rxrpc_put_call(call, rxrpc_call_put);
322 _leave(" = %d", ret);
323 return ERR_PTR(ret);
324}
325
326/*
327 * Retry a call to a new address. It is expected that the Tx queue of the call
328 * will contain data previously packaged for an old call.
329 */
330int rxrpc_retry_client_call(struct rxrpc_sock *rx,
331 struct rxrpc_call *call,
332 struct rxrpc_conn_parameters *cp,
333 struct sockaddr_rxrpc *srx,
334 gfp_t gfp)
335{
336 const void *here = __builtin_return_address(0);
337 int ret;
338
339 /* Set up or get a connection record and set the protocol parameters,
340 * including channel number and call ID.
341 */
342 ret = rxrpc_connect_call(call, cp, srx, gfp);
343 if (ret < 0)
344 goto error;
345
346 trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
347 here, NULL);
348
349 rxrpc_start_call_timer(call);
350
351 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
352
353 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
354 rxrpc_queue_call(call);
355
356 _leave(" = 0");
357 return 0;
358
359error:
360 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
361 RX_CALL_DEAD, ret);
362 trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
363 here, ERR_PTR(ret));
364 _leave(" = %d", ret);
365 return ret;
366}
367
368/*
369 * Set up an incoming call. call->conn points to the connection.
370 * This is called in BH context and isn't allowed to fail.
371 */
372void rxrpc_incoming_call(struct rxrpc_sock *rx,
373 struct rxrpc_call *call,
374 struct sk_buff *skb)
375{
376 struct rxrpc_connection *conn = call->conn;
377 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
378 u32 chan;
379
380 _enter(",%d", call->conn->debug_id);
381
382 rcu_assign_pointer(call->socket, rx);
383 call->call_id = sp->hdr.callNumber;
384 call->service_id = sp->hdr.serviceId;
385 call->cid = sp->hdr.cid;
386 call->state = RXRPC_CALL_SERVER_ACCEPTING;
387 if (sp->hdr.securityIndex > 0)
388 call->state = RXRPC_CALL_SERVER_SECURING;
389 call->cong_tstamp = skb->tstamp;
390
391 /* Set the channel for this call. We don't get channel_lock as we're
392 * only defending against the data_ready handler (which we're called
393 * from) and the RESPONSE packet parser (which is only really
394 * interested in call_counter and can cope with a disagreement with the
395 * call pointer).
396 */
397 chan = sp->hdr.cid & RXRPC_CHANNELMASK;
398 conn->channels[chan].call_counter = call->call_id;
399 conn->channels[chan].call_id = call->call_id;
400 rcu_assign_pointer(conn->channels[chan].call, call);
401
402 spin_lock(&conn->params.peer->lock);
403 hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
404 spin_unlock(&conn->params.peer->lock);
405
406 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
407
408 rxrpc_start_call_timer(call);
409 _leave("");
410}
411
412/*
413 * Queue a call's work processor, getting a ref to pass to the work queue.
414 */
415bool rxrpc_queue_call(struct rxrpc_call *call)
416{
417 const void *here = __builtin_return_address(0);
418 int n = __atomic_add_unless(&call->usage, 1, 0);
419 if (n == 0)
420 return false;
421 if (rxrpc_queue_work(&call->processor))
422 trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
423 else
424 rxrpc_put_call(call, rxrpc_call_put_noqueue);
425 return true;
426}
427
428/*
429 * Queue a call's work processor, passing the callers ref to the work queue.
430 */
431bool __rxrpc_queue_call(struct rxrpc_call *call)
432{
433 const void *here = __builtin_return_address(0);
434 int n = atomic_read(&call->usage);
435 ASSERTCMP(n, >=, 1);
436 if (rxrpc_queue_work(&call->processor))
437 trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
438 else
439 rxrpc_put_call(call, rxrpc_call_put_noqueue);
440 return true;
441}
442
443/*
444 * Note the re-emergence of a call.
445 */
446void rxrpc_see_call(struct rxrpc_call *call)
447{
448 const void *here = __builtin_return_address(0);
449 if (call) {
450 int n = atomic_read(&call->usage);
451
452 trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
453 }
454}
455
456/*
457 * Note the addition of a ref on a call.
458 */
459void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
460{
461 const void *here = __builtin_return_address(0);
462 int n = atomic_inc_return(&call->usage);
463
464 trace_rxrpc_call(call, op, n, here, NULL);
465}
466
467/*
468 * Detach a call from its owning socket.
469 */
470void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
471{
472 const void *here = __builtin_return_address(0);
473 struct rxrpc_connection *conn = call->conn;
474 bool put = false;
475 int i;
476
477 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
478
479 trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
480 here, (const void *)call->flags);
481
482 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
483
484 spin_lock_bh(&call->lock);
485 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
486 BUG();
487 spin_unlock_bh(&call->lock);
488
489 del_timer_sync(&call->timer);
490
491 /* Make sure we don't get any more notifications */
492 write_lock_bh(&rx->recvmsg_lock);
493
494 if (!list_empty(&call->recvmsg_link)) {
495 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
496 call, call->events, call->flags);
497 list_del(&call->recvmsg_link);
498 put = true;
499 }
500
501 /* list_empty() must return false in rxrpc_notify_socket() */
502 call->recvmsg_link.next = NULL;
503 call->recvmsg_link.prev = NULL;
504
505 write_unlock_bh(&rx->recvmsg_lock);
506 if (put)
507 rxrpc_put_call(call, rxrpc_call_put);
508
509 write_lock(&rx->call_lock);
510
511 if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
512 rb_erase(&call->sock_node, &rx->calls);
513 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
514 rxrpc_put_call(call, rxrpc_call_put_userid);
515 }
516
517 list_del(&call->sock_link);
518 write_unlock(&rx->call_lock);
519
520 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
521
522 if (conn)
523 rxrpc_disconnect_call(call);
524
525 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
526 rxrpc_free_skb(call->rxtx_buffer[i],
527 (call->tx_phase ? rxrpc_skb_tx_cleaned :
528 rxrpc_skb_rx_cleaned));
529 call->rxtx_buffer[i] = NULL;
530 }
531
532 _leave("");
533}
534
535/*
536 * Prepare a kernel service call for retry.
537 */
538int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call)
539{
540 const void *here = __builtin_return_address(0);
541 int i;
542 u8 last = 0;
543
544 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
545
546 trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
547 here, (const void *)call->flags);
548
549 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
550 ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED);
551 ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED);
552 ASSERT(list_empty(&call->recvmsg_link));
553
554 del_timer_sync(&call->timer);
555
556 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn);
557
558 if (call->conn)
559 rxrpc_disconnect_call(call);
560
561 if (rxrpc_is_service_call(call) ||
562 !call->tx_phase ||
563 call->tx_hard_ack != 0 ||
564 call->rx_hard_ack != 0 ||
565 call->rx_top != 0)
566 return -EINVAL;
567
568 call->state = RXRPC_CALL_UNINITIALISED;
569 call->completion = RXRPC_CALL_SUCCEEDED;
570 call->call_id = 0;
571 call->cid = 0;
572 call->cong_cwnd = 0;
573 call->cong_extra = 0;
574 call->cong_ssthresh = 0;
575 call->cong_mode = 0;
576 call->cong_dup_acks = 0;
577 call->cong_cumul_acks = 0;
578 call->acks_lowest_nak = 0;
579
580 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
581 last |= call->rxtx_annotations[i];
582 call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST;
583 call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS;
584 }
585
586 _leave(" = 0");
587 return 0;
588}
589
590/*
591 * release all the calls associated with a socket
592 */
593void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
594{
595 struct rxrpc_call *call;
596
597 _enter("%p", rx);
598
599 while (!list_empty(&rx->to_be_accepted)) {
600 call = list_entry(rx->to_be_accepted.next,
601 struct rxrpc_call, accept_link);
602 list_del(&call->accept_link);
603 rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
604 rxrpc_put_call(call, rxrpc_call_put);
605 }
606
607 while (!list_empty(&rx->sock_calls)) {
608 call = list_entry(rx->sock_calls.next,
609 struct rxrpc_call, sock_link);
610 rxrpc_get_call(call, rxrpc_call_got);
611 rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
612 rxrpc_send_abort_packet(call);
613 rxrpc_release_call(rx, call);
614 rxrpc_put_call(call, rxrpc_call_put);
615 }
616
617 _leave("");
618}
619
620/*
621 * release a call
622 */
623void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
624{
625 struct rxrpc_net *rxnet = call->rxnet;
626 const void *here = __builtin_return_address(0);
627 int n;
628
629 ASSERT(call != NULL);
630
631 n = atomic_dec_return(&call->usage);
632 trace_rxrpc_call(call, op, n, here, NULL);
633 ASSERTCMP(n, >=, 0);
634 if (n == 0) {
635 _debug("call %d dead", call->debug_id);
636 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
637
638 if (!list_empty(&call->link)) {
639 write_lock(&rxnet->call_lock);
640 list_del_init(&call->link);
641 write_unlock(&rxnet->call_lock);
642 }
643
644 rxrpc_cleanup_call(call);
645 }
646}
647
648/*
649 * Final call destruction under RCU.
650 */
651static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
652{
653 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
654 struct rxrpc_net *rxnet = call->rxnet;
655
656 rxrpc_put_peer(call->peer);
657 kfree(call->rxtx_buffer);
658 kfree(call->rxtx_annotations);
659 kmem_cache_free(rxrpc_call_jar, call);
660 if (atomic_dec_and_test(&rxnet->nr_calls))
661 wake_up_var(&rxnet->nr_calls);
662}
663
664/*
665 * clean up a call
666 */
667void rxrpc_cleanup_call(struct rxrpc_call *call)
668{
669 int i;
670
671 _net("DESTROY CALL %d", call->debug_id);
672
673 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
674
675 del_timer_sync(&call->timer);
676
677 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
678 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
679 ASSERTCMP(call->conn, ==, NULL);
680
681 /* Clean up the Rx/Tx buffer */
682 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++)
683 rxrpc_free_skb(call->rxtx_buffer[i],
684 (call->tx_phase ? rxrpc_skb_tx_cleaned :
685 rxrpc_skb_rx_cleaned));
686
687 rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
688
689 call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
690}
691
692/*
693 * Make sure that all calls are gone from a network namespace. To reach this
694 * point, any open UDP sockets in that namespace must have been closed, so any
695 * outstanding calls cannot be doing I/O.
696 */
697void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
698{
699 struct rxrpc_call *call;
700
701 _enter("");
702
703 if (list_empty(&rxnet->calls))
704 return;
705
706 write_lock(&rxnet->call_lock);
707
708 while (!list_empty(&rxnet->calls)) {
709 call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
710 _debug("Zapping call %p", call);
711
712 rxrpc_see_call(call);
713 list_del_init(&call->link);
714
715 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
716 call, atomic_read(&call->usage),
717 rxrpc_call_states[call->state],
718 call->flags, call->events);
719
720 write_unlock(&rxnet->call_lock);
721 cond_resched();
722 write_lock(&rxnet->call_lock);
723 }
724
725 write_unlock(&rxnet->call_lock);
726
727 atomic_dec(&rxnet->nr_calls);
728 wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
729}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* RxRPC individual remote procedure call handling
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/circ_buf.h>
13#include <linux/spinlock_types.h>
14#include <net/sock.h>
15#include <net/af_rxrpc.h>
16#include "ar-internal.h"
17
18const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
19 [RXRPC_CALL_UNINITIALISED] = "Uninit ",
20 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
21 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
22 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
23 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
24 [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
25 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
26 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
27 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
28 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
29 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
30 [RXRPC_CALL_COMPLETE] = "Complete",
31};
32
33const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
34 [RXRPC_CALL_SUCCEEDED] = "Complete",
35 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
36 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
37 [RXRPC_CALL_LOCAL_ERROR] = "LocError",
38 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
39};
40
41struct kmem_cache *rxrpc_call_jar;
42
43static DEFINE_SEMAPHORE(rxrpc_call_limiter, 1000);
44static DEFINE_SEMAPHORE(rxrpc_kernel_call_limiter, 1000);
45
46void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what)
47{
48 struct rxrpc_local *local = call->local;
49 bool busy;
50
51 if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) {
52 spin_lock_bh(&local->lock);
53 busy = !list_empty(&call->attend_link);
54 trace_rxrpc_poke_call(call, busy, what);
55 if (!busy && !rxrpc_try_get_call(call, rxrpc_call_get_poke))
56 busy = true;
57 if (!busy) {
58 list_add_tail(&call->attend_link, &local->call_attend_q);
59 }
60 spin_unlock_bh(&local->lock);
61 if (!busy)
62 rxrpc_wake_up_io_thread(local);
63 }
64}
65
66static void rxrpc_call_timer_expired(struct timer_list *t)
67{
68 struct rxrpc_call *call = from_timer(call, t, timer);
69
70 _enter("%d", call->debug_id);
71
72 if (!__rxrpc_call_is_complete(call)) {
73 trace_rxrpc_timer_expired(call, jiffies);
74 rxrpc_poke_call(call, rxrpc_call_poke_timer);
75 }
76}
77
78void rxrpc_reduce_call_timer(struct rxrpc_call *call,
79 unsigned long expire_at,
80 unsigned long now,
81 enum rxrpc_timer_trace why)
82{
83 trace_rxrpc_timer(call, why, now);
84 timer_reduce(&call->timer, expire_at);
85}
86
87static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
88
89static void rxrpc_destroy_call(struct work_struct *);
90
91/*
92 * find an extant server call
93 * - called in process context with IRQs enabled
94 */
95struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
96 unsigned long user_call_ID)
97{
98 struct rxrpc_call *call;
99 struct rb_node *p;
100
101 _enter("%p,%lx", rx, user_call_ID);
102
103 read_lock(&rx->call_lock);
104
105 p = rx->calls.rb_node;
106 while (p) {
107 call = rb_entry(p, struct rxrpc_call, sock_node);
108
109 if (user_call_ID < call->user_call_ID)
110 p = p->rb_left;
111 else if (user_call_ID > call->user_call_ID)
112 p = p->rb_right;
113 else
114 goto found_extant_call;
115 }
116
117 read_unlock(&rx->call_lock);
118 _leave(" = NULL");
119 return NULL;
120
121found_extant_call:
122 rxrpc_get_call(call, rxrpc_call_get_sendmsg);
123 read_unlock(&rx->call_lock);
124 _leave(" = %p [%d]", call, refcount_read(&call->ref));
125 return call;
126}
127
128/*
129 * allocate a new call
130 */
131struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
132 unsigned int debug_id)
133{
134 struct rxrpc_call *call;
135 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
136
137 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
138 if (!call)
139 return NULL;
140
141 mutex_init(&call->user_mutex);
142
143 /* Prevent lockdep reporting a deadlock false positive between the afs
144 * filesystem and sys_sendmsg() via the mmap sem.
145 */
146 if (rx->sk.sk_kern_sock)
147 lockdep_set_class(&call->user_mutex,
148 &rxrpc_call_user_mutex_lock_class_key);
149
150 timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
151 INIT_WORK(&call->destroyer, rxrpc_destroy_call);
152 INIT_LIST_HEAD(&call->link);
153 INIT_LIST_HEAD(&call->wait_link);
154 INIT_LIST_HEAD(&call->accept_link);
155 INIT_LIST_HEAD(&call->recvmsg_link);
156 INIT_LIST_HEAD(&call->sock_link);
157 INIT_LIST_HEAD(&call->attend_link);
158 INIT_LIST_HEAD(&call->tx_sendmsg);
159 INIT_LIST_HEAD(&call->tx_buffer);
160 skb_queue_head_init(&call->recvmsg_queue);
161 skb_queue_head_init(&call->rx_oos_queue);
162 init_waitqueue_head(&call->waitq);
163 spin_lock_init(&call->notify_lock);
164 spin_lock_init(&call->tx_lock);
165 refcount_set(&call->ref, 1);
166 call->debug_id = debug_id;
167 call->tx_total_len = -1;
168 call->next_rx_timo = 20 * HZ;
169 call->next_req_timo = 1 * HZ;
170 call->ackr_window = 1;
171 call->ackr_wtop = 1;
172
173 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
174
175 call->rx_winsize = rxrpc_rx_window_size;
176 call->tx_winsize = 16;
177
178 if (RXRPC_TX_SMSS > 2190)
179 call->cong_cwnd = 2;
180 else if (RXRPC_TX_SMSS > 1095)
181 call->cong_cwnd = 3;
182 else
183 call->cong_cwnd = 4;
184 call->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
185
186 call->rxnet = rxnet;
187 call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
188 atomic_inc(&rxnet->nr_calls);
189 return call;
190}
191
192/*
193 * Allocate a new client call.
194 */
195static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
196 struct rxrpc_conn_parameters *cp,
197 struct rxrpc_call_params *p,
198 gfp_t gfp,
199 unsigned int debug_id)
200{
201 struct rxrpc_call *call;
202 ktime_t now;
203 int ret;
204
205 _enter("");
206
207 call = rxrpc_alloc_call(rx, gfp, debug_id);
208 if (!call)
209 return ERR_PTR(-ENOMEM);
210 now = ktime_get_real();
211 call->acks_latest_ts = now;
212 call->cong_tstamp = now;
213 call->dest_srx = cp->peer->srx;
214 call->dest_srx.srx_service = cp->service_id;
215 call->interruptibility = p->interruptibility;
216 call->tx_total_len = p->tx_total_len;
217 call->key = key_get(cp->key);
218 call->peer = rxrpc_get_peer(cp->peer, rxrpc_peer_get_call);
219 call->local = rxrpc_get_local(cp->local, rxrpc_local_get_call);
220 call->security_level = cp->security_level;
221 if (p->kernel)
222 __set_bit(RXRPC_CALL_KERNEL, &call->flags);
223 if (cp->upgrade)
224 __set_bit(RXRPC_CALL_UPGRADE, &call->flags);
225 if (cp->exclusive)
226 __set_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
227
228 if (p->timeouts.normal)
229 call->next_rx_timo = min(msecs_to_jiffies(p->timeouts.normal), 1UL);
230 if (p->timeouts.idle)
231 call->next_req_timo = min(msecs_to_jiffies(p->timeouts.idle), 1UL);
232 if (p->timeouts.hard)
233 call->hard_timo = p->timeouts.hard * HZ;
234
235 ret = rxrpc_init_client_call_security(call);
236 if (ret < 0) {
237 rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, ret);
238 rxrpc_put_call(call, rxrpc_call_put_discard_error);
239 return ERR_PTR(ret);
240 }
241
242 rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_CONN);
243
244 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
245 p->user_call_ID, rxrpc_call_new_client);
246
247 _leave(" = %p", call);
248 return call;
249}
250
251/*
252 * Initiate the call ack/resend/expiry timer.
253 */
254void rxrpc_start_call_timer(struct rxrpc_call *call)
255{
256 unsigned long now = jiffies;
257 unsigned long j = now + MAX_JIFFY_OFFSET;
258
259 call->delay_ack_at = j;
260 call->ack_lost_at = j;
261 call->resend_at = j;
262 call->ping_at = j;
263 call->keepalive_at = j;
264 call->expect_rx_by = j;
265 call->expect_req_by = j;
266 call->expect_term_by = j + call->hard_timo;
267 call->timer.expires = now;
268}
269
270/*
271 * Wait for a call slot to become available.
272 */
273static struct semaphore *rxrpc_get_call_slot(struct rxrpc_call_params *p, gfp_t gfp)
274{
275 struct semaphore *limiter = &rxrpc_call_limiter;
276
277 if (p->kernel)
278 limiter = &rxrpc_kernel_call_limiter;
279 if (p->interruptibility == RXRPC_UNINTERRUPTIBLE) {
280 down(limiter);
281 return limiter;
282 }
283 return down_interruptible(limiter) < 0 ? NULL : limiter;
284}
285
286/*
287 * Release a call slot.
288 */
289static void rxrpc_put_call_slot(struct rxrpc_call *call)
290{
291 struct semaphore *limiter = &rxrpc_call_limiter;
292
293 if (test_bit(RXRPC_CALL_KERNEL, &call->flags))
294 limiter = &rxrpc_kernel_call_limiter;
295 up(limiter);
296}
297
298/*
299 * Start the process of connecting a call. We obtain a peer and a connection
300 * bundle, but the actual association of a call with a connection is offloaded
301 * to the I/O thread to simplify locking.
302 */
303static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp)
304{
305 struct rxrpc_local *local = call->local;
306 int ret = -ENOMEM;
307
308 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
309
310 ret = rxrpc_look_up_bundle(call, gfp);
311 if (ret < 0)
312 goto error;
313
314 trace_rxrpc_client(NULL, -1, rxrpc_client_queue_new_call);
315 rxrpc_get_call(call, rxrpc_call_get_io_thread);
316 spin_lock(&local->client_call_lock);
317 list_add_tail(&call->wait_link, &local->new_client_calls);
318 spin_unlock(&local->client_call_lock);
319 rxrpc_wake_up_io_thread(local);
320 return 0;
321
322error:
323 __set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
324 return ret;
325}
326
327/*
328 * Set up a call for the given parameters.
329 * - Called with the socket lock held, which it must release.
330 * - If it returns a call, the call's lock will need releasing by the caller.
331 */
332struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
333 struct rxrpc_conn_parameters *cp,
334 struct rxrpc_call_params *p,
335 gfp_t gfp,
336 unsigned int debug_id)
337 __releases(&rx->sk.sk_lock.slock)
338 __acquires(&call->user_mutex)
339{
340 struct rxrpc_call *call, *xcall;
341 struct rxrpc_net *rxnet;
342 struct semaphore *limiter;
343 struct rb_node *parent, **pp;
344 int ret;
345
346 _enter("%p,%lx", rx, p->user_call_ID);
347
348 if (WARN_ON_ONCE(!cp->peer)) {
349 release_sock(&rx->sk);
350 return ERR_PTR(-EIO);
351 }
352
353 limiter = rxrpc_get_call_slot(p, gfp);
354 if (!limiter) {
355 release_sock(&rx->sk);
356 return ERR_PTR(-ERESTARTSYS);
357 }
358
359 call = rxrpc_alloc_client_call(rx, cp, p, gfp, debug_id);
360 if (IS_ERR(call)) {
361 release_sock(&rx->sk);
362 up(limiter);
363 _leave(" = %ld", PTR_ERR(call));
364 return call;
365 }
366
367 /* We need to protect a partially set up call against the user as we
368 * will be acting outside the socket lock.
369 */
370 mutex_lock(&call->user_mutex);
371
372 /* Publish the call, even though it is incompletely set up as yet */
373 write_lock(&rx->call_lock);
374
375 pp = &rx->calls.rb_node;
376 parent = NULL;
377 while (*pp) {
378 parent = *pp;
379 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
380
381 if (p->user_call_ID < xcall->user_call_ID)
382 pp = &(*pp)->rb_left;
383 else if (p->user_call_ID > xcall->user_call_ID)
384 pp = &(*pp)->rb_right;
385 else
386 goto error_dup_user_ID;
387 }
388
389 rcu_assign_pointer(call->socket, rx);
390 call->user_call_ID = p->user_call_ID;
391 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
392 rxrpc_get_call(call, rxrpc_call_get_userid);
393 rb_link_node(&call->sock_node, parent, pp);
394 rb_insert_color(&call->sock_node, &rx->calls);
395 list_add(&call->sock_link, &rx->sock_calls);
396
397 write_unlock(&rx->call_lock);
398
399 rxnet = call->rxnet;
400 spin_lock(&rxnet->call_lock);
401 list_add_tail_rcu(&call->link, &rxnet->calls);
402 spin_unlock(&rxnet->call_lock);
403
404 /* From this point on, the call is protected by its own lock. */
405 release_sock(&rx->sk);
406
407 /* Set up or get a connection record and set the protocol parameters,
408 * including channel number and call ID.
409 */
410 ret = rxrpc_connect_call(call, gfp);
411 if (ret < 0)
412 goto error_attached_to_socket;
413
414 _leave(" = %p [new]", call);
415 return call;
416
417 /* We unexpectedly found the user ID in the list after taking
418 * the call_lock. This shouldn't happen unless the user races
419 * with itself and tries to add the same user ID twice at the
420 * same time in different threads.
421 */
422error_dup_user_ID:
423 write_unlock(&rx->call_lock);
424 release_sock(&rx->sk);
425 rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EEXIST);
426 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), 0,
427 rxrpc_call_see_userid_exists);
428 mutex_unlock(&call->user_mutex);
429 rxrpc_put_call(call, rxrpc_call_put_userid_exists);
430 _leave(" = -EEXIST");
431 return ERR_PTR(-EEXIST);
432
433 /* We got an error, but the call is attached to the socket and is in
434 * need of release. However, we might now race with recvmsg() when it
435 * completion notifies the socket. Return 0 from sys_sendmsg() and
436 * leave the error to recvmsg() to deal with.
437 */
438error_attached_to_socket:
439 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), ret,
440 rxrpc_call_see_connect_failed);
441 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
442 _leave(" = c=%08x [err]", call->debug_id);
443 return call;
444}
445
446/*
447 * Set up an incoming call. call->conn points to the connection.
448 * This is called in BH context and isn't allowed to fail.
449 */
450void rxrpc_incoming_call(struct rxrpc_sock *rx,
451 struct rxrpc_call *call,
452 struct sk_buff *skb)
453{
454 struct rxrpc_connection *conn = call->conn;
455 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
456 u32 chan;
457
458 _enter(",%d", call->conn->debug_id);
459
460 rcu_assign_pointer(call->socket, rx);
461 call->call_id = sp->hdr.callNumber;
462 call->dest_srx.srx_service = sp->hdr.serviceId;
463 call->cid = sp->hdr.cid;
464 call->cong_tstamp = skb->tstamp;
465
466 __set_bit(RXRPC_CALL_EXPOSED, &call->flags);
467 rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
468
469 spin_lock(&conn->state_lock);
470
471 switch (conn->state) {
472 case RXRPC_CONN_SERVICE_UNSECURED:
473 case RXRPC_CONN_SERVICE_CHALLENGING:
474 rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
475 break;
476 case RXRPC_CONN_SERVICE:
477 rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
478 break;
479
480 case RXRPC_CONN_ABORTED:
481 rxrpc_set_call_completion(call, conn->completion,
482 conn->abort_code, conn->error);
483 break;
484 default:
485 BUG();
486 }
487
488 rxrpc_get_call(call, rxrpc_call_get_io_thread);
489
490 /* Set the channel for this call. We don't get channel_lock as we're
491 * only defending against the data_ready handler (which we're called
492 * from) and the RESPONSE packet parser (which is only really
493 * interested in call_counter and can cope with a disagreement with the
494 * call pointer).
495 */
496 chan = sp->hdr.cid & RXRPC_CHANNELMASK;
497 conn->channels[chan].call_counter = call->call_id;
498 conn->channels[chan].call_id = call->call_id;
499 conn->channels[chan].call = call;
500 spin_unlock(&conn->state_lock);
501
502 spin_lock(&conn->peer->lock);
503 hlist_add_head(&call->error_link, &conn->peer->error_targets);
504 spin_unlock(&conn->peer->lock);
505
506 rxrpc_start_call_timer(call);
507 _leave("");
508}
509
510/*
511 * Note the re-emergence of a call.
512 */
513void rxrpc_see_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
514{
515 if (call) {
516 int r = refcount_read(&call->ref);
517
518 trace_rxrpc_call(call->debug_id, r, 0, why);
519 }
520}
521
522struct rxrpc_call *rxrpc_try_get_call(struct rxrpc_call *call,
523 enum rxrpc_call_trace why)
524{
525 int r;
526
527 if (!call || !__refcount_inc_not_zero(&call->ref, &r))
528 return NULL;
529 trace_rxrpc_call(call->debug_id, r + 1, 0, why);
530 return call;
531}
532
533/*
534 * Note the addition of a ref on a call.
535 */
536void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
537{
538 int r;
539
540 __refcount_inc(&call->ref, &r);
541 trace_rxrpc_call(call->debug_id, r + 1, 0, why);
542}
543
544/*
545 * Clean up the Rx skb ring.
546 */
547static void rxrpc_cleanup_ring(struct rxrpc_call *call)
548{
549 rxrpc_purge_queue(&call->recvmsg_queue);
550 rxrpc_purge_queue(&call->rx_oos_queue);
551}
552
553/*
554 * Detach a call from its owning socket.
555 */
556void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
557{
558 struct rxrpc_connection *conn = call->conn;
559 bool put = false, putu = false;
560
561 _enter("{%d,%d}", call->debug_id, refcount_read(&call->ref));
562
563 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
564 call->flags, rxrpc_call_see_release);
565
566 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
567 BUG();
568
569 rxrpc_put_call_slot(call);
570
571 /* Make sure we don't get any more notifications */
572 spin_lock(&rx->recvmsg_lock);
573
574 if (!list_empty(&call->recvmsg_link)) {
575 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
576 call, call->events, call->flags);
577 list_del(&call->recvmsg_link);
578 put = true;
579 }
580
581 /* list_empty() must return false in rxrpc_notify_socket() */
582 call->recvmsg_link.next = NULL;
583 call->recvmsg_link.prev = NULL;
584
585 spin_unlock(&rx->recvmsg_lock);
586 if (put)
587 rxrpc_put_call(call, rxrpc_call_put_unnotify);
588
589 write_lock(&rx->call_lock);
590
591 if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
592 rb_erase(&call->sock_node, &rx->calls);
593 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
594 putu = true;
595 }
596
597 list_del(&call->sock_link);
598 write_unlock(&rx->call_lock);
599
600 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
601
602 if (putu)
603 rxrpc_put_call(call, rxrpc_call_put_userid);
604
605 _leave("");
606}
607
608/*
609 * release all the calls associated with a socket
610 */
611void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
612{
613 struct rxrpc_call *call;
614
615 _enter("%p", rx);
616
617 while (!list_empty(&rx->to_be_accepted)) {
618 call = list_entry(rx->to_be_accepted.next,
619 struct rxrpc_call, accept_link);
620 list_del(&call->accept_link);
621 rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET,
622 rxrpc_abort_call_sock_release_tba);
623 rxrpc_put_call(call, rxrpc_call_put_release_sock_tba);
624 }
625
626 while (!list_empty(&rx->sock_calls)) {
627 call = list_entry(rx->sock_calls.next,
628 struct rxrpc_call, sock_link);
629 rxrpc_get_call(call, rxrpc_call_get_release_sock);
630 rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET,
631 rxrpc_abort_call_sock_release);
632 rxrpc_release_call(rx, call);
633 rxrpc_put_call(call, rxrpc_call_put_release_sock);
634 }
635
636 _leave("");
637}
638
639/*
640 * release a call
641 */
642void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
643{
644 struct rxrpc_net *rxnet = call->rxnet;
645 unsigned int debug_id = call->debug_id;
646 bool dead;
647 int r;
648
649 ASSERT(call != NULL);
650
651 dead = __refcount_dec_and_test(&call->ref, &r);
652 trace_rxrpc_call(debug_id, r - 1, 0, why);
653 if (dead) {
654 ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
655
656 if (!list_empty(&call->link)) {
657 spin_lock(&rxnet->call_lock);
658 list_del_init(&call->link);
659 spin_unlock(&rxnet->call_lock);
660 }
661
662 rxrpc_cleanup_call(call);
663 }
664}
665
666/*
667 * Free up the call under RCU.
668 */
669static void rxrpc_rcu_free_call(struct rcu_head *rcu)
670{
671 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
672 struct rxrpc_net *rxnet = READ_ONCE(call->rxnet);
673
674 kmem_cache_free(rxrpc_call_jar, call);
675 if (atomic_dec_and_test(&rxnet->nr_calls))
676 wake_up_var(&rxnet->nr_calls);
677}
678
679/*
680 * Final call destruction - but must be done in process context.
681 */
682static void rxrpc_destroy_call(struct work_struct *work)
683{
684 struct rxrpc_call *call = container_of(work, struct rxrpc_call, destroyer);
685 struct rxrpc_txbuf *txb;
686
687 del_timer_sync(&call->timer);
688
689 rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
690 rxrpc_cleanup_ring(call);
691 while ((txb = list_first_entry_or_null(&call->tx_sendmsg,
692 struct rxrpc_txbuf, call_link))) {
693 list_del(&txb->call_link);
694 rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned);
695 }
696 while ((txb = list_first_entry_or_null(&call->tx_buffer,
697 struct rxrpc_txbuf, call_link))) {
698 list_del(&txb->call_link);
699 rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned);
700 }
701
702 rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned);
703 rxrpc_put_connection(call->conn, rxrpc_conn_put_call);
704 rxrpc_deactivate_bundle(call->bundle);
705 rxrpc_put_bundle(call->bundle, rxrpc_bundle_put_call);
706 rxrpc_put_peer(call->peer, rxrpc_peer_put_call);
707 rxrpc_put_local(call->local, rxrpc_local_put_call);
708 call_rcu(&call->rcu, rxrpc_rcu_free_call);
709}
710
711/*
712 * clean up a call
713 */
714void rxrpc_cleanup_call(struct rxrpc_call *call)
715{
716 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
717
718 ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
719 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
720
721 del_timer(&call->timer);
722
723 if (rcu_read_lock_held())
724 /* Can't use the rxrpc workqueue as we need to cancel/flush
725 * something that may be running/waiting there.
726 */
727 schedule_work(&call->destroyer);
728 else
729 rxrpc_destroy_call(&call->destroyer);
730}
731
732/*
733 * Make sure that all calls are gone from a network namespace. To reach this
734 * point, any open UDP sockets in that namespace must have been closed, so any
735 * outstanding calls cannot be doing I/O.
736 */
737void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
738{
739 struct rxrpc_call *call;
740
741 _enter("");
742
743 if (!list_empty(&rxnet->calls)) {
744 spin_lock(&rxnet->call_lock);
745
746 while (!list_empty(&rxnet->calls)) {
747 call = list_entry(rxnet->calls.next,
748 struct rxrpc_call, link);
749 _debug("Zapping call %p", call);
750
751 rxrpc_see_call(call, rxrpc_call_see_zap);
752 list_del_init(&call->link);
753
754 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
755 call, refcount_read(&call->ref),
756 rxrpc_call_states[__rxrpc_call_state(call)],
757 call->flags, call->events);
758
759 spin_unlock(&rxnet->call_lock);
760 cond_resched();
761 spin_lock(&rxnet->call_lock);
762 }
763
764 spin_unlock(&rxnet->call_lock);
765 }
766
767 atomic_dec(&rxnet->nr_calls);
768 wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
769}