Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* RxRPC individual remote procedure call handling
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/circ_buf.h>
13#include <linux/spinlock_types.h>
14#include <net/sock.h>
15#include <net/af_rxrpc.h>
16#include "ar-internal.h"
17
18const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
19 [RXRPC_CALL_UNINITIALISED] = "Uninit ",
20 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
21 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
22 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
23 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
24 [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
25 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
26 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
27 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
28 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
29 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
30 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
31 [RXRPC_CALL_COMPLETE] = "Complete",
32};
33
34const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
35 [RXRPC_CALL_SUCCEEDED] = "Complete",
36 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
37 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
38 [RXRPC_CALL_LOCAL_ERROR] = "LocError",
39 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
40};
41
42struct kmem_cache *rxrpc_call_jar;
43
44static void rxrpc_call_timer_expired(struct timer_list *t)
45{
46 struct rxrpc_call *call = from_timer(call, t, timer);
47
48 _enter("%d", call->debug_id);
49
50 if (call->state < RXRPC_CALL_COMPLETE) {
51 trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
52 rxrpc_queue_call(call);
53 }
54}
55
56static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
57
58/*
59 * find an extant server call
60 * - called in process context with IRQs enabled
61 */
62struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
63 unsigned long user_call_ID)
64{
65 struct rxrpc_call *call;
66 struct rb_node *p;
67
68 _enter("%p,%lx", rx, user_call_ID);
69
70 read_lock(&rx->call_lock);
71
72 p = rx->calls.rb_node;
73 while (p) {
74 call = rb_entry(p, struct rxrpc_call, sock_node);
75
76 if (user_call_ID < call->user_call_ID)
77 p = p->rb_left;
78 else if (user_call_ID > call->user_call_ID)
79 p = p->rb_right;
80 else
81 goto found_extant_call;
82 }
83
84 read_unlock(&rx->call_lock);
85 _leave(" = NULL");
86 return NULL;
87
88found_extant_call:
89 rxrpc_get_call(call, rxrpc_call_got);
90 read_unlock(&rx->call_lock);
91 _leave(" = %p [%d]", call, atomic_read(&call->usage));
92 return call;
93}
94
95/*
96 * allocate a new call
97 */
98struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
99 unsigned int debug_id)
100{
101 struct rxrpc_call *call;
102 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
103
104 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
105 if (!call)
106 return NULL;
107
108 call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
109 sizeof(struct sk_buff *),
110 gfp);
111 if (!call->rxtx_buffer)
112 goto nomem;
113
114 call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
115 if (!call->rxtx_annotations)
116 goto nomem_2;
117
118 mutex_init(&call->user_mutex);
119
120 /* Prevent lockdep reporting a deadlock false positive between the afs
121 * filesystem and sys_sendmsg() via the mmap sem.
122 */
123 if (rx->sk.sk_kern_sock)
124 lockdep_set_class(&call->user_mutex,
125 &rxrpc_call_user_mutex_lock_class_key);
126
127 timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
128 INIT_WORK(&call->processor, &rxrpc_process_call);
129 INIT_LIST_HEAD(&call->link);
130 INIT_LIST_HEAD(&call->chan_wait_link);
131 INIT_LIST_HEAD(&call->accept_link);
132 INIT_LIST_HEAD(&call->recvmsg_link);
133 INIT_LIST_HEAD(&call->sock_link);
134 init_waitqueue_head(&call->waitq);
135 spin_lock_init(&call->lock);
136 spin_lock_init(&call->notify_lock);
137 spin_lock_init(&call->input_lock);
138 rwlock_init(&call->state_lock);
139 atomic_set(&call->usage, 1);
140 call->debug_id = debug_id;
141 call->tx_total_len = -1;
142 call->next_rx_timo = 20 * HZ;
143 call->next_req_timo = 1 * HZ;
144
145 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
146
147 /* Leave space in the ring to handle a maxed-out jumbo packet */
148 call->rx_winsize = rxrpc_rx_window_size;
149 call->tx_winsize = 16;
150 call->rx_expect_next = 1;
151
152 call->cong_cwnd = 2;
153 call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
154
155 call->rxnet = rxnet;
156 atomic_inc(&rxnet->nr_calls);
157 return call;
158
159nomem_2:
160 kfree(call->rxtx_buffer);
161nomem:
162 kmem_cache_free(rxrpc_call_jar, call);
163 return NULL;
164}
165
166/*
167 * Allocate a new client call.
168 */
169static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
170 struct sockaddr_rxrpc *srx,
171 gfp_t gfp,
172 unsigned int debug_id)
173{
174 struct rxrpc_call *call;
175 ktime_t now;
176
177 _enter("");
178
179 call = rxrpc_alloc_call(rx, gfp, debug_id);
180 if (!call)
181 return ERR_PTR(-ENOMEM);
182 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
183 call->service_id = srx->srx_service;
184 call->tx_phase = true;
185 now = ktime_get_real();
186 call->acks_latest_ts = now;
187 call->cong_tstamp = now;
188
189 _leave(" = %p", call);
190 return call;
191}
192
193/*
194 * Initiate the call ack/resend/expiry timer.
195 */
196static void rxrpc_start_call_timer(struct rxrpc_call *call)
197{
198 unsigned long now = jiffies;
199 unsigned long j = now + MAX_JIFFY_OFFSET;
200
201 call->ack_at = j;
202 call->ack_lost_at = j;
203 call->resend_at = j;
204 call->ping_at = j;
205 call->expect_rx_by = j;
206 call->expect_req_by = j;
207 call->expect_term_by = j;
208 call->timer.expires = now;
209}
210
211/*
212 * Set up a call for the given parameters.
213 * - Called with the socket lock held, which it must release.
214 * - If it returns a call, the call's lock will need releasing by the caller.
215 */
216struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
217 struct rxrpc_conn_parameters *cp,
218 struct sockaddr_rxrpc *srx,
219 struct rxrpc_call_params *p,
220 gfp_t gfp,
221 unsigned int debug_id)
222 __releases(&rx->sk.sk_lock.slock)
223 __acquires(&call->user_mutex)
224{
225 struct rxrpc_call *call, *xcall;
226 struct rxrpc_net *rxnet;
227 struct rb_node *parent, **pp;
228 const void *here = __builtin_return_address(0);
229 int ret;
230
231 _enter("%p,%lx", rx, p->user_call_ID);
232
233 call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
234 if (IS_ERR(call)) {
235 release_sock(&rx->sk);
236 _leave(" = %ld", PTR_ERR(call));
237 return call;
238 }
239
240 if (p->intr)
241 __set_bit(RXRPC_CALL_IS_INTR, &call->flags);
242 call->tx_total_len = p->tx_total_len;
243 trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
244 atomic_read(&call->usage),
245 here, (const void *)p->user_call_ID);
246
247 /* We need to protect a partially set up call against the user as we
248 * will be acting outside the socket lock.
249 */
250 mutex_lock(&call->user_mutex);
251
252 /* Publish the call, even though it is incompletely set up as yet */
253 write_lock(&rx->call_lock);
254
255 pp = &rx->calls.rb_node;
256 parent = NULL;
257 while (*pp) {
258 parent = *pp;
259 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
260
261 if (p->user_call_ID < xcall->user_call_ID)
262 pp = &(*pp)->rb_left;
263 else if (p->user_call_ID > xcall->user_call_ID)
264 pp = &(*pp)->rb_right;
265 else
266 goto error_dup_user_ID;
267 }
268
269 rcu_assign_pointer(call->socket, rx);
270 call->user_call_ID = p->user_call_ID;
271 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
272 rxrpc_get_call(call, rxrpc_call_got_userid);
273 rb_link_node(&call->sock_node, parent, pp);
274 rb_insert_color(&call->sock_node, &rx->calls);
275 list_add(&call->sock_link, &rx->sock_calls);
276
277 write_unlock(&rx->call_lock);
278
279 rxnet = call->rxnet;
280 write_lock(&rxnet->call_lock);
281 list_add_tail(&call->link, &rxnet->calls);
282 write_unlock(&rxnet->call_lock);
283
284 /* From this point on, the call is protected by its own lock. */
285 release_sock(&rx->sk);
286
287 /* Set up or get a connection record and set the protocol parameters,
288 * including channel number and call ID.
289 */
290 ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
291 if (ret < 0)
292 goto error;
293
294 trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
295 atomic_read(&call->usage), here, NULL);
296
297 rxrpc_start_call_timer(call);
298
299 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
300
301 _leave(" = %p [new]", call);
302 return call;
303
304 /* We unexpectedly found the user ID in the list after taking
305 * the call_lock. This shouldn't happen unless the user races
306 * with itself and tries to add the same user ID twice at the
307 * same time in different threads.
308 */
309error_dup_user_ID:
310 write_unlock(&rx->call_lock);
311 release_sock(&rx->sk);
312 ret = -EEXIST;
313
314error:
315 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
316 RX_CALL_DEAD, ret);
317 trace_rxrpc_call(call->debug_id, rxrpc_call_error,
318 atomic_read(&call->usage), here, ERR_PTR(ret));
319 rxrpc_release_call(rx, call);
320 mutex_unlock(&call->user_mutex);
321 rxrpc_put_call(call, rxrpc_call_put);
322 _leave(" = %d", ret);
323 return ERR_PTR(ret);
324}
325
326/*
327 * Set up an incoming call. call->conn points to the connection.
328 * This is called in BH context and isn't allowed to fail.
329 */
330void rxrpc_incoming_call(struct rxrpc_sock *rx,
331 struct rxrpc_call *call,
332 struct sk_buff *skb)
333{
334 struct rxrpc_connection *conn = call->conn;
335 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
336 u32 chan;
337
338 _enter(",%d", call->conn->debug_id);
339
340 rcu_assign_pointer(call->socket, rx);
341 call->call_id = sp->hdr.callNumber;
342 call->service_id = sp->hdr.serviceId;
343 call->cid = sp->hdr.cid;
344 call->state = RXRPC_CALL_SERVER_ACCEPTING;
345 if (sp->hdr.securityIndex > 0)
346 call->state = RXRPC_CALL_SERVER_SECURING;
347 call->cong_tstamp = skb->tstamp;
348
349 /* Set the channel for this call. We don't get channel_lock as we're
350 * only defending against the data_ready handler (which we're called
351 * from) and the RESPONSE packet parser (which is only really
352 * interested in call_counter and can cope with a disagreement with the
353 * call pointer).
354 */
355 chan = sp->hdr.cid & RXRPC_CHANNELMASK;
356 conn->channels[chan].call_counter = call->call_id;
357 conn->channels[chan].call_id = call->call_id;
358 rcu_assign_pointer(conn->channels[chan].call, call);
359
360 spin_lock(&conn->params.peer->lock);
361 hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
362 spin_unlock(&conn->params.peer->lock);
363
364 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
365
366 rxrpc_start_call_timer(call);
367 _leave("");
368}
369
370/*
371 * Queue a call's work processor, getting a ref to pass to the work queue.
372 */
373bool rxrpc_queue_call(struct rxrpc_call *call)
374{
375 const void *here = __builtin_return_address(0);
376 int n = atomic_fetch_add_unless(&call->usage, 1, 0);
377 if (n == 0)
378 return false;
379 if (rxrpc_queue_work(&call->processor))
380 trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
381 here, NULL);
382 else
383 rxrpc_put_call(call, rxrpc_call_put_noqueue);
384 return true;
385}
386
387/*
388 * Queue a call's work processor, passing the callers ref to the work queue.
389 */
390bool __rxrpc_queue_call(struct rxrpc_call *call)
391{
392 const void *here = __builtin_return_address(0);
393 int n = atomic_read(&call->usage);
394 ASSERTCMP(n, >=, 1);
395 if (rxrpc_queue_work(&call->processor))
396 trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
397 here, NULL);
398 else
399 rxrpc_put_call(call, rxrpc_call_put_noqueue);
400 return true;
401}
402
403/*
404 * Note the re-emergence of a call.
405 */
406void rxrpc_see_call(struct rxrpc_call *call)
407{
408 const void *here = __builtin_return_address(0);
409 if (call) {
410 int n = atomic_read(&call->usage);
411
412 trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
413 here, NULL);
414 }
415}
416
417/*
418 * Note the addition of a ref on a call.
419 */
420void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
421{
422 const void *here = __builtin_return_address(0);
423 int n = atomic_inc_return(&call->usage);
424
425 trace_rxrpc_call(call->debug_id, op, n, here, NULL);
426}
427
428/*
429 * Clean up the RxTx skb ring.
430 */
431static void rxrpc_cleanup_ring(struct rxrpc_call *call)
432{
433 int i;
434
435 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
436 rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
437 call->rxtx_buffer[i] = NULL;
438 }
439}
440
441/*
442 * Detach a call from its owning socket.
443 */
444void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
445{
446 const void *here = __builtin_return_address(0);
447 struct rxrpc_connection *conn = call->conn;
448 bool put = false;
449
450 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
451
452 trace_rxrpc_call(call->debug_id, rxrpc_call_release,
453 atomic_read(&call->usage),
454 here, (const void *)call->flags);
455
456 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
457
458 spin_lock_bh(&call->lock);
459 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
460 BUG();
461 spin_unlock_bh(&call->lock);
462
463 del_timer_sync(&call->timer);
464
465 /* Make sure we don't get any more notifications */
466 write_lock_bh(&rx->recvmsg_lock);
467
468 if (!list_empty(&call->recvmsg_link)) {
469 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
470 call, call->events, call->flags);
471 list_del(&call->recvmsg_link);
472 put = true;
473 }
474
475 /* list_empty() must return false in rxrpc_notify_socket() */
476 call->recvmsg_link.next = NULL;
477 call->recvmsg_link.prev = NULL;
478
479 write_unlock_bh(&rx->recvmsg_lock);
480 if (put)
481 rxrpc_put_call(call, rxrpc_call_put);
482
483 write_lock(&rx->call_lock);
484
485 if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
486 rb_erase(&call->sock_node, &rx->calls);
487 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
488 rxrpc_put_call(call, rxrpc_call_put_userid);
489 }
490
491 list_del(&call->sock_link);
492 write_unlock(&rx->call_lock);
493
494 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
495
496 if (conn)
497 rxrpc_disconnect_call(call);
498 if (call->security)
499 call->security->free_call_crypto(call);
500
501 rxrpc_cleanup_ring(call);
502 _leave("");
503}
504
505/*
506 * release all the calls associated with a socket
507 */
508void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
509{
510 struct rxrpc_call *call;
511
512 _enter("%p", rx);
513
514 while (!list_empty(&rx->to_be_accepted)) {
515 call = list_entry(rx->to_be_accepted.next,
516 struct rxrpc_call, accept_link);
517 list_del(&call->accept_link);
518 rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
519 rxrpc_put_call(call, rxrpc_call_put);
520 }
521
522 while (!list_empty(&rx->sock_calls)) {
523 call = list_entry(rx->sock_calls.next,
524 struct rxrpc_call, sock_link);
525 rxrpc_get_call(call, rxrpc_call_got);
526 rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
527 rxrpc_send_abort_packet(call);
528 rxrpc_release_call(rx, call);
529 rxrpc_put_call(call, rxrpc_call_put);
530 }
531
532 _leave("");
533}
534
535/*
536 * release a call
537 */
538void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
539{
540 struct rxrpc_net *rxnet = call->rxnet;
541 const void *here = __builtin_return_address(0);
542 unsigned int debug_id = call->debug_id;
543 int n;
544
545 ASSERT(call != NULL);
546
547 n = atomic_dec_return(&call->usage);
548 trace_rxrpc_call(debug_id, op, n, here, NULL);
549 ASSERTCMP(n, >=, 0);
550 if (n == 0) {
551 _debug("call %d dead", call->debug_id);
552 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
553
554 if (!list_empty(&call->link)) {
555 write_lock(&rxnet->call_lock);
556 list_del_init(&call->link);
557 write_unlock(&rxnet->call_lock);
558 }
559
560 rxrpc_cleanup_call(call);
561 }
562}
563
564/*
565 * Final call destruction under RCU.
566 */
567static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
568{
569 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
570 struct rxrpc_net *rxnet = call->rxnet;
571
572 rxrpc_put_peer(call->peer);
573 kfree(call->rxtx_buffer);
574 kfree(call->rxtx_annotations);
575 kmem_cache_free(rxrpc_call_jar, call);
576 if (atomic_dec_and_test(&rxnet->nr_calls))
577 wake_up_var(&rxnet->nr_calls);
578}
579
580/*
581 * clean up a call
582 */
583void rxrpc_cleanup_call(struct rxrpc_call *call)
584{
585 _net("DESTROY CALL %d", call->debug_id);
586
587 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
588
589 del_timer_sync(&call->timer);
590
591 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
592 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
593 ASSERTCMP(call->conn, ==, NULL);
594
595 rxrpc_cleanup_ring(call);
596 rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
597
598 call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
599}
600
601/*
602 * Make sure that all calls are gone from a network namespace. To reach this
603 * point, any open UDP sockets in that namespace must have been closed, so any
604 * outstanding calls cannot be doing I/O.
605 */
606void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
607{
608 struct rxrpc_call *call;
609
610 _enter("");
611
612 if (!list_empty(&rxnet->calls)) {
613 write_lock(&rxnet->call_lock);
614
615 while (!list_empty(&rxnet->calls)) {
616 call = list_entry(rxnet->calls.next,
617 struct rxrpc_call, link);
618 _debug("Zapping call %p", call);
619
620 rxrpc_see_call(call);
621 list_del_init(&call->link);
622
623 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
624 call, atomic_read(&call->usage),
625 rxrpc_call_states[call->state],
626 call->flags, call->events);
627
628 write_unlock(&rxnet->call_lock);
629 cond_resched();
630 write_lock(&rxnet->call_lock);
631 }
632
633 write_unlock(&rxnet->call_lock);
634 }
635
636 atomic_dec(&rxnet->nr_calls);
637 wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
638}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* RxRPC individual remote procedure call handling
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/circ_buf.h>
13#include <linux/spinlock_types.h>
14#include <net/sock.h>
15#include <net/af_rxrpc.h>
16#include "ar-internal.h"
17
18const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
19 [RXRPC_CALL_UNINITIALISED] = "Uninit ",
20 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
21 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
22 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
23 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
24 [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
25 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
26 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
27 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
28 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
29 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
30 [RXRPC_CALL_COMPLETE] = "Complete",
31};
32
33const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
34 [RXRPC_CALL_SUCCEEDED] = "Complete",
35 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
36 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
37 [RXRPC_CALL_LOCAL_ERROR] = "LocError",
38 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
39};
40
41struct kmem_cache *rxrpc_call_jar;
42
43static struct semaphore rxrpc_call_limiter =
44 __SEMAPHORE_INITIALIZER(rxrpc_call_limiter, 1000);
45static struct semaphore rxrpc_kernel_call_limiter =
46 __SEMAPHORE_INITIALIZER(rxrpc_kernel_call_limiter, 1000);
47
48static void rxrpc_call_timer_expired(struct timer_list *t)
49{
50 struct rxrpc_call *call = from_timer(call, t, timer);
51
52 _enter("%d", call->debug_id);
53
54 if (call->state < RXRPC_CALL_COMPLETE) {
55 trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
56 rxrpc_queue_call(call);
57 }
58}
59
60static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
61
62/*
63 * find an extant server call
64 * - called in process context with IRQs enabled
65 */
66struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
67 unsigned long user_call_ID)
68{
69 struct rxrpc_call *call;
70 struct rb_node *p;
71
72 _enter("%p,%lx", rx, user_call_ID);
73
74 read_lock(&rx->call_lock);
75
76 p = rx->calls.rb_node;
77 while (p) {
78 call = rb_entry(p, struct rxrpc_call, sock_node);
79
80 if (user_call_ID < call->user_call_ID)
81 p = p->rb_left;
82 else if (user_call_ID > call->user_call_ID)
83 p = p->rb_right;
84 else
85 goto found_extant_call;
86 }
87
88 read_unlock(&rx->call_lock);
89 _leave(" = NULL");
90 return NULL;
91
92found_extant_call:
93 rxrpc_get_call(call, rxrpc_call_got);
94 read_unlock(&rx->call_lock);
95 _leave(" = %p [%d]", call, atomic_read(&call->usage));
96 return call;
97}
98
99/*
100 * allocate a new call
101 */
102struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
103 unsigned int debug_id)
104{
105 struct rxrpc_call *call;
106 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
107
108 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
109 if (!call)
110 return NULL;
111
112 call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
113 sizeof(struct sk_buff *),
114 gfp);
115 if (!call->rxtx_buffer)
116 goto nomem;
117
118 call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
119 if (!call->rxtx_annotations)
120 goto nomem_2;
121
122 mutex_init(&call->user_mutex);
123
124 /* Prevent lockdep reporting a deadlock false positive between the afs
125 * filesystem and sys_sendmsg() via the mmap sem.
126 */
127 if (rx->sk.sk_kern_sock)
128 lockdep_set_class(&call->user_mutex,
129 &rxrpc_call_user_mutex_lock_class_key);
130
131 timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
132 INIT_WORK(&call->processor, &rxrpc_process_call);
133 INIT_LIST_HEAD(&call->link);
134 INIT_LIST_HEAD(&call->chan_wait_link);
135 INIT_LIST_HEAD(&call->accept_link);
136 INIT_LIST_HEAD(&call->recvmsg_link);
137 INIT_LIST_HEAD(&call->sock_link);
138 init_waitqueue_head(&call->waitq);
139 spin_lock_init(&call->lock);
140 spin_lock_init(&call->notify_lock);
141 spin_lock_init(&call->input_lock);
142 rwlock_init(&call->state_lock);
143 atomic_set(&call->usage, 1);
144 call->debug_id = debug_id;
145 call->tx_total_len = -1;
146 call->next_rx_timo = 20 * HZ;
147 call->next_req_timo = 1 * HZ;
148
149 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
150
151 /* Leave space in the ring to handle a maxed-out jumbo packet */
152 call->rx_winsize = rxrpc_rx_window_size;
153 call->tx_winsize = 16;
154 call->rx_expect_next = 1;
155
156 call->cong_cwnd = 2;
157 call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
158
159 call->rxnet = rxnet;
160 call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
161 atomic_inc(&rxnet->nr_calls);
162 return call;
163
164nomem_2:
165 kfree(call->rxtx_buffer);
166nomem:
167 kmem_cache_free(rxrpc_call_jar, call);
168 return NULL;
169}
170
171/*
172 * Allocate a new client call.
173 */
174static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
175 struct sockaddr_rxrpc *srx,
176 gfp_t gfp,
177 unsigned int debug_id)
178{
179 struct rxrpc_call *call;
180 ktime_t now;
181
182 _enter("");
183
184 call = rxrpc_alloc_call(rx, gfp, debug_id);
185 if (!call)
186 return ERR_PTR(-ENOMEM);
187 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
188 call->service_id = srx->srx_service;
189 call->tx_phase = true;
190 now = ktime_get_real();
191 call->acks_latest_ts = now;
192 call->cong_tstamp = now;
193
194 _leave(" = %p", call);
195 return call;
196}
197
198/*
199 * Initiate the call ack/resend/expiry timer.
200 */
201static void rxrpc_start_call_timer(struct rxrpc_call *call)
202{
203 unsigned long now = jiffies;
204 unsigned long j = now + MAX_JIFFY_OFFSET;
205
206 call->ack_at = j;
207 call->ack_lost_at = j;
208 call->resend_at = j;
209 call->ping_at = j;
210 call->expect_rx_by = j;
211 call->expect_req_by = j;
212 call->expect_term_by = j;
213 call->timer.expires = now;
214}
215
216/*
217 * Wait for a call slot to become available.
218 */
219static struct semaphore *rxrpc_get_call_slot(struct rxrpc_call_params *p, gfp_t gfp)
220{
221 struct semaphore *limiter = &rxrpc_call_limiter;
222
223 if (p->kernel)
224 limiter = &rxrpc_kernel_call_limiter;
225 if (p->interruptibility == RXRPC_UNINTERRUPTIBLE) {
226 down(limiter);
227 return limiter;
228 }
229 return down_interruptible(limiter) < 0 ? NULL : limiter;
230}
231
232/*
233 * Release a call slot.
234 */
235static void rxrpc_put_call_slot(struct rxrpc_call *call)
236{
237 struct semaphore *limiter = &rxrpc_call_limiter;
238
239 if (test_bit(RXRPC_CALL_KERNEL, &call->flags))
240 limiter = &rxrpc_kernel_call_limiter;
241 up(limiter);
242}
243
244/*
245 * Set up a call for the given parameters.
246 * - Called with the socket lock held, which it must release.
247 * - If it returns a call, the call's lock will need releasing by the caller.
248 */
249struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
250 struct rxrpc_conn_parameters *cp,
251 struct sockaddr_rxrpc *srx,
252 struct rxrpc_call_params *p,
253 gfp_t gfp,
254 unsigned int debug_id)
255 __releases(&rx->sk.sk_lock.slock)
256 __acquires(&call->user_mutex)
257{
258 struct rxrpc_call *call, *xcall;
259 struct rxrpc_net *rxnet;
260 struct semaphore *limiter;
261 struct rb_node *parent, **pp;
262 const void *here = __builtin_return_address(0);
263 int ret;
264
265 _enter("%p,%lx", rx, p->user_call_ID);
266
267 limiter = rxrpc_get_call_slot(p, gfp);
268 if (!limiter)
269 return ERR_PTR(-ERESTARTSYS);
270
271 call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
272 if (IS_ERR(call)) {
273 release_sock(&rx->sk);
274 up(limiter);
275 _leave(" = %ld", PTR_ERR(call));
276 return call;
277 }
278
279 call->interruptibility = p->interruptibility;
280 call->tx_total_len = p->tx_total_len;
281 trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
282 atomic_read(&call->usage),
283 here, (const void *)p->user_call_ID);
284 if (p->kernel)
285 __set_bit(RXRPC_CALL_KERNEL, &call->flags);
286
287 /* We need to protect a partially set up call against the user as we
288 * will be acting outside the socket lock.
289 */
290 mutex_lock(&call->user_mutex);
291
292 /* Publish the call, even though it is incompletely set up as yet */
293 write_lock(&rx->call_lock);
294
295 pp = &rx->calls.rb_node;
296 parent = NULL;
297 while (*pp) {
298 parent = *pp;
299 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
300
301 if (p->user_call_ID < xcall->user_call_ID)
302 pp = &(*pp)->rb_left;
303 else if (p->user_call_ID > xcall->user_call_ID)
304 pp = &(*pp)->rb_right;
305 else
306 goto error_dup_user_ID;
307 }
308
309 rcu_assign_pointer(call->socket, rx);
310 call->user_call_ID = p->user_call_ID;
311 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
312 rxrpc_get_call(call, rxrpc_call_got_userid);
313 rb_link_node(&call->sock_node, parent, pp);
314 rb_insert_color(&call->sock_node, &rx->calls);
315 list_add(&call->sock_link, &rx->sock_calls);
316
317 write_unlock(&rx->call_lock);
318
319 rxnet = call->rxnet;
320 write_lock(&rxnet->call_lock);
321 list_add_tail(&call->link, &rxnet->calls);
322 write_unlock(&rxnet->call_lock);
323
324 /* From this point on, the call is protected by its own lock. */
325 release_sock(&rx->sk);
326
327 /* Set up or get a connection record and set the protocol parameters,
328 * including channel number and call ID.
329 */
330 ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
331 if (ret < 0)
332 goto error_attached_to_socket;
333
334 trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
335 atomic_read(&call->usage), here, NULL);
336
337 rxrpc_start_call_timer(call);
338
339 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
340
341 _leave(" = %p [new]", call);
342 return call;
343
344 /* We unexpectedly found the user ID in the list after taking
345 * the call_lock. This shouldn't happen unless the user races
346 * with itself and tries to add the same user ID twice at the
347 * same time in different threads.
348 */
349error_dup_user_ID:
350 write_unlock(&rx->call_lock);
351 release_sock(&rx->sk);
352 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
353 RX_CALL_DEAD, -EEXIST);
354 trace_rxrpc_call(call->debug_id, rxrpc_call_error,
355 atomic_read(&call->usage), here, ERR_PTR(-EEXIST));
356 rxrpc_release_call(rx, call);
357 mutex_unlock(&call->user_mutex);
358 rxrpc_put_call(call, rxrpc_call_put);
359 _leave(" = -EEXIST");
360 return ERR_PTR(-EEXIST);
361
362 /* We got an error, but the call is attached to the socket and is in
363 * need of release. However, we might now race with recvmsg() when
364 * completing the call queues it. Return 0 from sys_sendmsg() and
365 * leave the error to recvmsg() to deal with.
366 */
367error_attached_to_socket:
368 trace_rxrpc_call(call->debug_id, rxrpc_call_error,
369 atomic_read(&call->usage), here, ERR_PTR(ret));
370 set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
371 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
372 RX_CALL_DEAD, ret);
373 _leave(" = c=%08x [err]", call->debug_id);
374 return call;
375}
376
377/*
378 * Set up an incoming call. call->conn points to the connection.
379 * This is called in BH context and isn't allowed to fail.
380 */
381void rxrpc_incoming_call(struct rxrpc_sock *rx,
382 struct rxrpc_call *call,
383 struct sk_buff *skb)
384{
385 struct rxrpc_connection *conn = call->conn;
386 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
387 u32 chan;
388
389 _enter(",%d", call->conn->debug_id);
390
391 rcu_assign_pointer(call->socket, rx);
392 call->call_id = sp->hdr.callNumber;
393 call->service_id = sp->hdr.serviceId;
394 call->cid = sp->hdr.cid;
395 call->state = RXRPC_CALL_SERVER_SECURING;
396 call->cong_tstamp = skb->tstamp;
397
398 /* Set the channel for this call. We don't get channel_lock as we're
399 * only defending against the data_ready handler (which we're called
400 * from) and the RESPONSE packet parser (which is only really
401 * interested in call_counter and can cope with a disagreement with the
402 * call pointer).
403 */
404 chan = sp->hdr.cid & RXRPC_CHANNELMASK;
405 conn->channels[chan].call_counter = call->call_id;
406 conn->channels[chan].call_id = call->call_id;
407 rcu_assign_pointer(conn->channels[chan].call, call);
408
409 spin_lock(&conn->params.peer->lock);
410 hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
411 spin_unlock(&conn->params.peer->lock);
412
413 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
414
415 rxrpc_start_call_timer(call);
416 _leave("");
417}
418
419/*
420 * Queue a call's work processor, getting a ref to pass to the work queue.
421 */
422bool rxrpc_queue_call(struct rxrpc_call *call)
423{
424 const void *here = __builtin_return_address(0);
425 int n = atomic_fetch_add_unless(&call->usage, 1, 0);
426 if (n == 0)
427 return false;
428 if (rxrpc_queue_work(&call->processor))
429 trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
430 here, NULL);
431 else
432 rxrpc_put_call(call, rxrpc_call_put_noqueue);
433 return true;
434}
435
436/*
437 * Queue a call's work processor, passing the callers ref to the work queue.
438 */
439bool __rxrpc_queue_call(struct rxrpc_call *call)
440{
441 const void *here = __builtin_return_address(0);
442 int n = atomic_read(&call->usage);
443 ASSERTCMP(n, >=, 1);
444 if (rxrpc_queue_work(&call->processor))
445 trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
446 here, NULL);
447 else
448 rxrpc_put_call(call, rxrpc_call_put_noqueue);
449 return true;
450}
451
452/*
453 * Note the re-emergence of a call.
454 */
455void rxrpc_see_call(struct rxrpc_call *call)
456{
457 const void *here = __builtin_return_address(0);
458 if (call) {
459 int n = atomic_read(&call->usage);
460
461 trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
462 here, NULL);
463 }
464}
465
466/*
467 * Note the addition of a ref on a call.
468 */
469void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
470{
471 const void *here = __builtin_return_address(0);
472 int n = atomic_inc_return(&call->usage);
473
474 trace_rxrpc_call(call->debug_id, op, n, here, NULL);
475}
476
477/*
478 * Clean up the RxTx skb ring.
479 */
480static void rxrpc_cleanup_ring(struct rxrpc_call *call)
481{
482 int i;
483
484 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
485 rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
486 call->rxtx_buffer[i] = NULL;
487 }
488}
489
490/*
491 * Detach a call from its owning socket.
492 */
493void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
494{
495 const void *here = __builtin_return_address(0);
496 struct rxrpc_connection *conn = call->conn;
497 bool put = false;
498
499 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
500
501 trace_rxrpc_call(call->debug_id, rxrpc_call_release,
502 atomic_read(&call->usage),
503 here, (const void *)call->flags);
504
505 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
506
507 spin_lock_bh(&call->lock);
508 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
509 BUG();
510 spin_unlock_bh(&call->lock);
511
512 rxrpc_put_call_slot(call);
513
514 del_timer_sync(&call->timer);
515
516 /* Make sure we don't get any more notifications */
517 write_lock_bh(&rx->recvmsg_lock);
518
519 if (!list_empty(&call->recvmsg_link)) {
520 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
521 call, call->events, call->flags);
522 list_del(&call->recvmsg_link);
523 put = true;
524 }
525
526 /* list_empty() must return false in rxrpc_notify_socket() */
527 call->recvmsg_link.next = NULL;
528 call->recvmsg_link.prev = NULL;
529
530 write_unlock_bh(&rx->recvmsg_lock);
531 if (put)
532 rxrpc_put_call(call, rxrpc_call_put);
533
534 write_lock(&rx->call_lock);
535
536 if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
537 rb_erase(&call->sock_node, &rx->calls);
538 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
539 rxrpc_put_call(call, rxrpc_call_put_userid);
540 }
541
542 list_del(&call->sock_link);
543 write_unlock(&rx->call_lock);
544
545 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
546
547 if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
548 rxrpc_disconnect_call(call);
549 if (call->security)
550 call->security->free_call_crypto(call);
551 _leave("");
552}
553
554/*
555 * release all the calls associated with a socket
556 */
557void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
558{
559 struct rxrpc_call *call;
560
561 _enter("%p", rx);
562
563 while (!list_empty(&rx->to_be_accepted)) {
564 call = list_entry(rx->to_be_accepted.next,
565 struct rxrpc_call, accept_link);
566 list_del(&call->accept_link);
567 rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
568 rxrpc_put_call(call, rxrpc_call_put);
569 }
570
571 while (!list_empty(&rx->sock_calls)) {
572 call = list_entry(rx->sock_calls.next,
573 struct rxrpc_call, sock_link);
574 rxrpc_get_call(call, rxrpc_call_got);
575 rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
576 rxrpc_send_abort_packet(call);
577 rxrpc_release_call(rx, call);
578 rxrpc_put_call(call, rxrpc_call_put);
579 }
580
581 _leave("");
582}
583
584/*
585 * release a call
586 */
587void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
588{
589 struct rxrpc_net *rxnet = call->rxnet;
590 const void *here = __builtin_return_address(0);
591 unsigned int debug_id = call->debug_id;
592 int n;
593
594 ASSERT(call != NULL);
595
596 n = atomic_dec_return(&call->usage);
597 trace_rxrpc_call(debug_id, op, n, here, NULL);
598 ASSERTCMP(n, >=, 0);
599 if (n == 0) {
600 _debug("call %d dead", call->debug_id);
601 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
602
603 if (!list_empty(&call->link)) {
604 write_lock(&rxnet->call_lock);
605 list_del_init(&call->link);
606 write_unlock(&rxnet->call_lock);
607 }
608
609 rxrpc_cleanup_call(call);
610 }
611}
612
613/*
614 * Final call destruction - but must be done in process context.
615 */
616static void rxrpc_destroy_call(struct work_struct *work)
617{
618 struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
619 struct rxrpc_net *rxnet = call->rxnet;
620
621 rxrpc_put_connection(call->conn);
622 rxrpc_put_peer(call->peer);
623 kfree(call->rxtx_buffer);
624 kfree(call->rxtx_annotations);
625 kmem_cache_free(rxrpc_call_jar, call);
626 if (atomic_dec_and_test(&rxnet->nr_calls))
627 wake_up_var(&rxnet->nr_calls);
628}
629
630/*
631 * Final call destruction under RCU.
632 */
633static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
634{
635 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
636
637 if (in_softirq()) {
638 INIT_WORK(&call->processor, rxrpc_destroy_call);
639 if (!rxrpc_queue_work(&call->processor))
640 BUG();
641 } else {
642 rxrpc_destroy_call(&call->processor);
643 }
644}
645
646/*
647 * clean up a call
648 */
649void rxrpc_cleanup_call(struct rxrpc_call *call)
650{
651 _net("DESTROY CALL %d", call->debug_id);
652
653 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
654
655 del_timer_sync(&call->timer);
656
657 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
658 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
659
660 rxrpc_cleanup_ring(call);
661 rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
662
663 call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
664}
665
666/*
667 * Make sure that all calls are gone from a network namespace. To reach this
668 * point, any open UDP sockets in that namespace must have been closed, so any
669 * outstanding calls cannot be doing I/O.
670 */
671void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
672{
673 struct rxrpc_call *call;
674
675 _enter("");
676
677 if (!list_empty(&rxnet->calls)) {
678 write_lock(&rxnet->call_lock);
679
680 while (!list_empty(&rxnet->calls)) {
681 call = list_entry(rxnet->calls.next,
682 struct rxrpc_call, link);
683 _debug("Zapping call %p", call);
684
685 rxrpc_see_call(call);
686 list_del_init(&call->link);
687
688 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
689 call, atomic_read(&call->usage),
690 rxrpc_call_states[call->state],
691 call->flags, call->events);
692
693 write_unlock(&rxnet->call_lock);
694 cond_resched();
695 write_lock(&rxnet->call_lock);
696 }
697
698 write_unlock(&rxnet->call_lock);
699 }
700
701 atomic_dec(&rxnet->nr_calls);
702 wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
703}