Loading...
1/* RxRPC virtual connection handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/net.h>
15#include <linux/skbuff.h>
16#include <linux/crypto.h>
17#include <net/sock.h>
18#include <net/af_rxrpc.h>
19#include "ar-internal.h"
20
21static void rxrpc_connection_reaper(struct work_struct *work);
22
23LIST_HEAD(rxrpc_connections);
24DEFINE_RWLOCK(rxrpc_connection_lock);
25static unsigned long rxrpc_connection_timeout = 10 * 60;
26static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
27
28/*
29 * allocate a new client connection bundle
30 */
31static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
32{
33 struct rxrpc_conn_bundle *bundle;
34
35 _enter("");
36
37 bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
38 if (bundle) {
39 INIT_LIST_HEAD(&bundle->unused_conns);
40 INIT_LIST_HEAD(&bundle->avail_conns);
41 INIT_LIST_HEAD(&bundle->busy_conns);
42 init_waitqueue_head(&bundle->chanwait);
43 atomic_set(&bundle->usage, 1);
44 }
45
46 _leave(" = %p", bundle);
47 return bundle;
48}
49
50/*
51 * compare bundle parameters with what we're looking for
52 * - return -ve, 0 or +ve
53 */
54static inline
55int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
56 struct key *key, __be16 service_id)
57{
58 return (bundle->service_id - service_id) ?:
59 ((unsigned long) bundle->key - (unsigned long) key);
60}
61
62/*
63 * get bundle of client connections that a client socket can make use of
64 */
65struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
66 struct rxrpc_transport *trans,
67 struct key *key,
68 __be16 service_id,
69 gfp_t gfp)
70{
71 struct rxrpc_conn_bundle *bundle, *candidate;
72 struct rb_node *p, *parent, **pp;
73
74 _enter("%p{%x},%x,%hx,",
75 rx, key_serial(key), trans->debug_id, ntohs(service_id));
76
77 if (rx->trans == trans && rx->bundle) {
78 atomic_inc(&rx->bundle->usage);
79 return rx->bundle;
80 }
81
82 /* search the extant bundles first for one that matches the specified
83 * user ID */
84 spin_lock(&trans->client_lock);
85
86 p = trans->bundles.rb_node;
87 while (p) {
88 bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
89
90 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
91 p = p->rb_left;
92 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
93 p = p->rb_right;
94 else
95 goto found_extant_bundle;
96 }
97
98 spin_unlock(&trans->client_lock);
99
100 /* not yet present - create a candidate for a new record and then
101 * redo the search */
102 candidate = rxrpc_alloc_bundle(gfp);
103 if (!candidate) {
104 _leave(" = -ENOMEM");
105 return ERR_PTR(-ENOMEM);
106 }
107
108 candidate->key = key_get(key);
109 candidate->service_id = service_id;
110
111 spin_lock(&trans->client_lock);
112
113 pp = &trans->bundles.rb_node;
114 parent = NULL;
115 while (*pp) {
116 parent = *pp;
117 bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
118
119 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
120 pp = &(*pp)->rb_left;
121 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
122 pp = &(*pp)->rb_right;
123 else
124 goto found_extant_second;
125 }
126
127 /* second search also failed; add the new bundle */
128 bundle = candidate;
129 candidate = NULL;
130
131 rb_link_node(&bundle->node, parent, pp);
132 rb_insert_color(&bundle->node, &trans->bundles);
133 spin_unlock(&trans->client_lock);
134 _net("BUNDLE new on trans %d", trans->debug_id);
135 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
136 atomic_inc(&bundle->usage);
137 rx->bundle = bundle;
138 }
139 _leave(" = %p [new]", bundle);
140 return bundle;
141
142 /* we found the bundle in the list immediately */
143found_extant_bundle:
144 atomic_inc(&bundle->usage);
145 spin_unlock(&trans->client_lock);
146 _net("BUNDLE old on trans %d", trans->debug_id);
147 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
148 atomic_inc(&bundle->usage);
149 rx->bundle = bundle;
150 }
151 _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
152 return bundle;
153
154 /* we found the bundle on the second time through the list */
155found_extant_second:
156 atomic_inc(&bundle->usage);
157 spin_unlock(&trans->client_lock);
158 kfree(candidate);
159 _net("BUNDLE old2 on trans %d", trans->debug_id);
160 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
161 atomic_inc(&bundle->usage);
162 rx->bundle = bundle;
163 }
164 _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
165 return bundle;
166}
167
168/*
169 * release a bundle
170 */
171void rxrpc_put_bundle(struct rxrpc_transport *trans,
172 struct rxrpc_conn_bundle *bundle)
173{
174 _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
175
176 if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
177 _debug("Destroy bundle");
178 rb_erase(&bundle->node, &trans->bundles);
179 spin_unlock(&trans->client_lock);
180 ASSERT(list_empty(&bundle->unused_conns));
181 ASSERT(list_empty(&bundle->avail_conns));
182 ASSERT(list_empty(&bundle->busy_conns));
183 ASSERTCMP(bundle->num_conns, ==, 0);
184 key_put(bundle->key);
185 kfree(bundle);
186 }
187
188 _leave("");
189}
190
191/*
192 * allocate a new connection
193 */
194static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
195{
196 struct rxrpc_connection *conn;
197
198 _enter("");
199
200 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
201 if (conn) {
202 INIT_WORK(&conn->processor, &rxrpc_process_connection);
203 INIT_LIST_HEAD(&conn->bundle_link);
204 conn->calls = RB_ROOT;
205 skb_queue_head_init(&conn->rx_queue);
206 rwlock_init(&conn->lock);
207 spin_lock_init(&conn->state_lock);
208 atomic_set(&conn->usage, 1);
209 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
210 conn->avail_calls = RXRPC_MAXCALLS;
211 conn->size_align = 4;
212 conn->header_size = sizeof(struct rxrpc_header);
213 }
214
215 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
216 return conn;
217}
218
219/*
220 * assign a connection ID to a connection and add it to the transport's
221 * connection lookup tree
222 * - called with transport client lock held
223 */
224static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
225{
226 struct rxrpc_connection *xconn;
227 struct rb_node *parent, **p;
228 __be32 epoch;
229 u32 real_conn_id;
230
231 _enter("");
232
233 epoch = conn->epoch;
234
235 write_lock_bh(&conn->trans->conn_lock);
236
237 conn->trans->conn_idcounter += RXRPC_CID_INC;
238 if (conn->trans->conn_idcounter < RXRPC_CID_INC)
239 conn->trans->conn_idcounter = RXRPC_CID_INC;
240 real_conn_id = conn->trans->conn_idcounter;
241
242attempt_insertion:
243 parent = NULL;
244 p = &conn->trans->client_conns.rb_node;
245
246 while (*p) {
247 parent = *p;
248 xconn = rb_entry(parent, struct rxrpc_connection, node);
249
250 if (epoch < xconn->epoch)
251 p = &(*p)->rb_left;
252 else if (epoch > xconn->epoch)
253 p = &(*p)->rb_right;
254 else if (real_conn_id < xconn->real_conn_id)
255 p = &(*p)->rb_left;
256 else if (real_conn_id > xconn->real_conn_id)
257 p = &(*p)->rb_right;
258 else
259 goto id_exists;
260 }
261
262 /* we've found a suitable hole - arrange for this connection to occupy
263 * it */
264 rb_link_node(&conn->node, parent, p);
265 rb_insert_color(&conn->node, &conn->trans->client_conns);
266
267 conn->real_conn_id = real_conn_id;
268 conn->cid = htonl(real_conn_id);
269 write_unlock_bh(&conn->trans->conn_lock);
270 _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid));
271 return;
272
273 /* we found a connection with the proposed ID - walk the tree from that
274 * point looking for the next unused ID */
275id_exists:
276 for (;;) {
277 real_conn_id += RXRPC_CID_INC;
278 if (real_conn_id < RXRPC_CID_INC) {
279 real_conn_id = RXRPC_CID_INC;
280 conn->trans->conn_idcounter = real_conn_id;
281 goto attempt_insertion;
282 }
283
284 parent = rb_next(parent);
285 if (!parent)
286 goto attempt_insertion;
287
288 xconn = rb_entry(parent, struct rxrpc_connection, node);
289 if (epoch < xconn->epoch ||
290 real_conn_id < xconn->real_conn_id)
291 goto attempt_insertion;
292 }
293}
294
295/*
296 * add a call to a connection's call-by-ID tree
297 */
298static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
299 struct rxrpc_call *call)
300{
301 struct rxrpc_call *xcall;
302 struct rb_node *parent, **p;
303 __be32 call_id;
304
305 write_lock_bh(&conn->lock);
306
307 call_id = call->call_id;
308 p = &conn->calls.rb_node;
309 parent = NULL;
310 while (*p) {
311 parent = *p;
312 xcall = rb_entry(parent, struct rxrpc_call, conn_node);
313
314 if (call_id < xcall->call_id)
315 p = &(*p)->rb_left;
316 else if (call_id > xcall->call_id)
317 p = &(*p)->rb_right;
318 else
319 BUG();
320 }
321
322 rb_link_node(&call->conn_node, parent, p);
323 rb_insert_color(&call->conn_node, &conn->calls);
324
325 write_unlock_bh(&conn->lock);
326}
327
328/*
329 * connect a call on an exclusive connection
330 */
331static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
332 struct rxrpc_transport *trans,
333 __be16 service_id,
334 struct rxrpc_call *call,
335 gfp_t gfp)
336{
337 struct rxrpc_connection *conn;
338 int chan, ret;
339
340 _enter("");
341
342 conn = rx->conn;
343 if (!conn) {
344 /* not yet present - create a candidate for a new connection
345 * and then redo the check */
346 conn = rxrpc_alloc_connection(gfp);
347 if (!conn) {
348 _leave(" = -ENOMEM");
349 return -ENOMEM;
350 }
351
352 conn->trans = trans;
353 conn->bundle = NULL;
354 conn->service_id = service_id;
355 conn->epoch = rxrpc_epoch;
356 conn->in_clientflag = 0;
357 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
358 conn->cid = 0;
359 conn->state = RXRPC_CONN_CLIENT;
360 conn->avail_calls = RXRPC_MAXCALLS - 1;
361 conn->security_level = rx->min_sec_level;
362 conn->key = key_get(rx->key);
363
364 ret = rxrpc_init_client_conn_security(conn);
365 if (ret < 0) {
366 key_put(conn->key);
367 kfree(conn);
368 _leave(" = %d [key]", ret);
369 return ret;
370 }
371
372 write_lock_bh(&rxrpc_connection_lock);
373 list_add_tail(&conn->link, &rxrpc_connections);
374 write_unlock_bh(&rxrpc_connection_lock);
375
376 spin_lock(&trans->client_lock);
377 atomic_inc(&trans->usage);
378
379 _net("CONNECT EXCL new %d on TRANS %d",
380 conn->debug_id, conn->trans->debug_id);
381
382 rxrpc_assign_connection_id(conn);
383 rx->conn = conn;
384 }
385
386 /* we've got a connection with a free channel and we can now attach the
387 * call to it
388 * - we're holding the transport's client lock
389 * - we're holding a reference on the connection
390 */
391 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
392 if (!conn->channels[chan])
393 goto found_channel;
394 goto no_free_channels;
395
396found_channel:
397 atomic_inc(&conn->usage);
398 conn->channels[chan] = call;
399 call->conn = conn;
400 call->channel = chan;
401 call->cid = conn->cid | htonl(chan);
402 call->call_id = htonl(++conn->call_counter);
403
404 _net("CONNECT client on conn %d chan %d as call %x",
405 conn->debug_id, chan, ntohl(call->call_id));
406
407 spin_unlock(&trans->client_lock);
408
409 rxrpc_add_call_ID_to_conn(conn, call);
410 _leave(" = 0");
411 return 0;
412
413no_free_channels:
414 spin_unlock(&trans->client_lock);
415 _leave(" = -ENOSR");
416 return -ENOSR;
417}
418
419/*
420 * find a connection for a call
421 * - called in process context with IRQs enabled
422 */
423int rxrpc_connect_call(struct rxrpc_sock *rx,
424 struct rxrpc_transport *trans,
425 struct rxrpc_conn_bundle *bundle,
426 struct rxrpc_call *call,
427 gfp_t gfp)
428{
429 struct rxrpc_connection *conn, *candidate;
430 int chan, ret;
431
432 DECLARE_WAITQUEUE(myself, current);
433
434 _enter("%p,%lx,", rx, call->user_call_ID);
435
436 if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags))
437 return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
438 call, gfp);
439
440 spin_lock(&trans->client_lock);
441 for (;;) {
442 /* see if the bundle has a call slot available */
443 if (!list_empty(&bundle->avail_conns)) {
444 _debug("avail");
445 conn = list_entry(bundle->avail_conns.next,
446 struct rxrpc_connection,
447 bundle_link);
448 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
449 list_del_init(&conn->bundle_link);
450 bundle->num_conns--;
451 continue;
452 }
453 if (--conn->avail_calls == 0)
454 list_move(&conn->bundle_link,
455 &bundle->busy_conns);
456 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
457 ASSERT(conn->channels[0] == NULL ||
458 conn->channels[1] == NULL ||
459 conn->channels[2] == NULL ||
460 conn->channels[3] == NULL);
461 atomic_inc(&conn->usage);
462 break;
463 }
464
465 if (!list_empty(&bundle->unused_conns)) {
466 _debug("unused");
467 conn = list_entry(bundle->unused_conns.next,
468 struct rxrpc_connection,
469 bundle_link);
470 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
471 list_del_init(&conn->bundle_link);
472 bundle->num_conns--;
473 continue;
474 }
475 ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS);
476 conn->avail_calls = RXRPC_MAXCALLS - 1;
477 ASSERT(conn->channels[0] == NULL &&
478 conn->channels[1] == NULL &&
479 conn->channels[2] == NULL &&
480 conn->channels[3] == NULL);
481 atomic_inc(&conn->usage);
482 list_move(&conn->bundle_link, &bundle->avail_conns);
483 break;
484 }
485
486 /* need to allocate a new connection */
487 _debug("get new conn [%d]", bundle->num_conns);
488
489 spin_unlock(&trans->client_lock);
490
491 if (signal_pending(current))
492 goto interrupted;
493
494 if (bundle->num_conns >= 20) {
495 _debug("too many conns");
496
497 if (!(gfp & __GFP_WAIT)) {
498 _leave(" = -EAGAIN");
499 return -EAGAIN;
500 }
501
502 add_wait_queue(&bundle->chanwait, &myself);
503 for (;;) {
504 set_current_state(TASK_INTERRUPTIBLE);
505 if (bundle->num_conns < 20 ||
506 !list_empty(&bundle->unused_conns) ||
507 !list_empty(&bundle->avail_conns))
508 break;
509 if (signal_pending(current))
510 goto interrupted_dequeue;
511 schedule();
512 }
513 remove_wait_queue(&bundle->chanwait, &myself);
514 __set_current_state(TASK_RUNNING);
515 spin_lock(&trans->client_lock);
516 continue;
517 }
518
519 /* not yet present - create a candidate for a new connection and then
520 * redo the check */
521 candidate = rxrpc_alloc_connection(gfp);
522 if (!candidate) {
523 _leave(" = -ENOMEM");
524 return -ENOMEM;
525 }
526
527 candidate->trans = trans;
528 candidate->bundle = bundle;
529 candidate->service_id = bundle->service_id;
530 candidate->epoch = rxrpc_epoch;
531 candidate->in_clientflag = 0;
532 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
533 candidate->cid = 0;
534 candidate->state = RXRPC_CONN_CLIENT;
535 candidate->avail_calls = RXRPC_MAXCALLS;
536 candidate->security_level = rx->min_sec_level;
537 candidate->key = key_get(bundle->key);
538
539 ret = rxrpc_init_client_conn_security(candidate);
540 if (ret < 0) {
541 key_put(candidate->key);
542 kfree(candidate);
543 _leave(" = %d [key]", ret);
544 return ret;
545 }
546
547 write_lock_bh(&rxrpc_connection_lock);
548 list_add_tail(&candidate->link, &rxrpc_connections);
549 write_unlock_bh(&rxrpc_connection_lock);
550
551 spin_lock(&trans->client_lock);
552
553 list_add(&candidate->bundle_link, &bundle->unused_conns);
554 bundle->num_conns++;
555 atomic_inc(&bundle->usage);
556 atomic_inc(&trans->usage);
557
558 _net("CONNECT new %d on TRANS %d",
559 candidate->debug_id, candidate->trans->debug_id);
560
561 rxrpc_assign_connection_id(candidate);
562 if (candidate->security)
563 candidate->security->prime_packet_security(candidate);
564
565 /* leave the candidate lurking in zombie mode attached to the
566 * bundle until we're ready for it */
567 rxrpc_put_connection(candidate);
568 candidate = NULL;
569 }
570
571 /* we've got a connection with a free channel and we can now attach the
572 * call to it
573 * - we're holding the transport's client lock
574 * - we're holding a reference on the connection
575 * - we're holding a reference on the bundle
576 */
577 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
578 if (!conn->channels[chan])
579 goto found_channel;
580 ASSERT(conn->channels[0] == NULL ||
581 conn->channels[1] == NULL ||
582 conn->channels[2] == NULL ||
583 conn->channels[3] == NULL);
584 BUG();
585
586found_channel:
587 conn->channels[chan] = call;
588 call->conn = conn;
589 call->channel = chan;
590 call->cid = conn->cid | htonl(chan);
591 call->call_id = htonl(++conn->call_counter);
592
593 _net("CONNECT client on conn %d chan %d as call %x",
594 conn->debug_id, chan, ntohl(call->call_id));
595
596 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
597 spin_unlock(&trans->client_lock);
598
599 rxrpc_add_call_ID_to_conn(conn, call);
600
601 _leave(" = 0");
602 return 0;
603
604interrupted_dequeue:
605 remove_wait_queue(&bundle->chanwait, &myself);
606 __set_current_state(TASK_RUNNING);
607interrupted:
608 _leave(" = -ERESTARTSYS");
609 return -ERESTARTSYS;
610}
611
612/*
613 * get a record of an incoming connection
614 */
615struct rxrpc_connection *
616rxrpc_incoming_connection(struct rxrpc_transport *trans,
617 struct rxrpc_header *hdr,
618 gfp_t gfp)
619{
620 struct rxrpc_connection *conn, *candidate = NULL;
621 struct rb_node *p, **pp;
622 const char *new = "old";
623 __be32 epoch;
624 u32 conn_id;
625
626 _enter("");
627
628 ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
629
630 epoch = hdr->epoch;
631 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
632
633 /* search the connection list first */
634 read_lock_bh(&trans->conn_lock);
635
636 p = trans->server_conns.rb_node;
637 while (p) {
638 conn = rb_entry(p, struct rxrpc_connection, node);
639
640 _debug("maybe %x", conn->real_conn_id);
641
642 if (epoch < conn->epoch)
643 p = p->rb_left;
644 else if (epoch > conn->epoch)
645 p = p->rb_right;
646 else if (conn_id < conn->real_conn_id)
647 p = p->rb_left;
648 else if (conn_id > conn->real_conn_id)
649 p = p->rb_right;
650 else
651 goto found_extant_connection;
652 }
653 read_unlock_bh(&trans->conn_lock);
654
655 /* not yet present - create a candidate for a new record and then
656 * redo the search */
657 candidate = rxrpc_alloc_connection(gfp);
658 if (!candidate) {
659 _leave(" = -ENOMEM");
660 return ERR_PTR(-ENOMEM);
661 }
662
663 candidate->trans = trans;
664 candidate->epoch = hdr->epoch;
665 candidate->cid = hdr->cid & cpu_to_be32(RXRPC_CIDMASK);
666 candidate->service_id = hdr->serviceId;
667 candidate->security_ix = hdr->securityIndex;
668 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
669 candidate->out_clientflag = 0;
670 candidate->real_conn_id = conn_id;
671 candidate->state = RXRPC_CONN_SERVER;
672 if (candidate->service_id)
673 candidate->state = RXRPC_CONN_SERVER_UNSECURED;
674
675 write_lock_bh(&trans->conn_lock);
676
677 pp = &trans->server_conns.rb_node;
678 p = NULL;
679 while (*pp) {
680 p = *pp;
681 conn = rb_entry(p, struct rxrpc_connection, node);
682
683 if (epoch < conn->epoch)
684 pp = &(*pp)->rb_left;
685 else if (epoch > conn->epoch)
686 pp = &(*pp)->rb_right;
687 else if (conn_id < conn->real_conn_id)
688 pp = &(*pp)->rb_left;
689 else if (conn_id > conn->real_conn_id)
690 pp = &(*pp)->rb_right;
691 else
692 goto found_extant_second;
693 }
694
695 /* we can now add the new candidate to the list */
696 conn = candidate;
697 candidate = NULL;
698 rb_link_node(&conn->node, p, pp);
699 rb_insert_color(&conn->node, &trans->server_conns);
700 atomic_inc(&conn->trans->usage);
701
702 write_unlock_bh(&trans->conn_lock);
703
704 write_lock_bh(&rxrpc_connection_lock);
705 list_add_tail(&conn->link, &rxrpc_connections);
706 write_unlock_bh(&rxrpc_connection_lock);
707
708 new = "new";
709
710success:
711 _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id);
712
713 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
714 return conn;
715
716 /* we found the connection in the list immediately */
717found_extant_connection:
718 if (hdr->securityIndex != conn->security_ix) {
719 read_unlock_bh(&trans->conn_lock);
720 goto security_mismatch;
721 }
722 atomic_inc(&conn->usage);
723 read_unlock_bh(&trans->conn_lock);
724 goto success;
725
726 /* we found the connection on the second time through the list */
727found_extant_second:
728 if (hdr->securityIndex != conn->security_ix) {
729 write_unlock_bh(&trans->conn_lock);
730 goto security_mismatch;
731 }
732 atomic_inc(&conn->usage);
733 write_unlock_bh(&trans->conn_lock);
734 kfree(candidate);
735 goto success;
736
737security_mismatch:
738 kfree(candidate);
739 _leave(" = -EKEYREJECTED");
740 return ERR_PTR(-EKEYREJECTED);
741}
742
743/*
744 * find a connection based on transport and RxRPC connection ID for an incoming
745 * packet
746 */
747struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
748 struct rxrpc_header *hdr)
749{
750 struct rxrpc_connection *conn;
751 struct rb_node *p;
752 __be32 epoch;
753 u32 conn_id;
754
755 _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags);
756
757 read_lock_bh(&trans->conn_lock);
758
759 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
760 epoch = hdr->epoch;
761
762 if (hdr->flags & RXRPC_CLIENT_INITIATED)
763 p = trans->server_conns.rb_node;
764 else
765 p = trans->client_conns.rb_node;
766
767 while (p) {
768 conn = rb_entry(p, struct rxrpc_connection, node);
769
770 _debug("maybe %x", conn->real_conn_id);
771
772 if (epoch < conn->epoch)
773 p = p->rb_left;
774 else if (epoch > conn->epoch)
775 p = p->rb_right;
776 else if (conn_id < conn->real_conn_id)
777 p = p->rb_left;
778 else if (conn_id > conn->real_conn_id)
779 p = p->rb_right;
780 else
781 goto found;
782 }
783
784 read_unlock_bh(&trans->conn_lock);
785 _leave(" = NULL");
786 return NULL;
787
788found:
789 atomic_inc(&conn->usage);
790 read_unlock_bh(&trans->conn_lock);
791 _leave(" = %p", conn);
792 return conn;
793}
794
795/*
796 * release a virtual connection
797 */
798void rxrpc_put_connection(struct rxrpc_connection *conn)
799{
800 _enter("%p{u=%d,d=%d}",
801 conn, atomic_read(&conn->usage), conn->debug_id);
802
803 ASSERTCMP(atomic_read(&conn->usage), >, 0);
804
805 conn->put_time = get_seconds();
806 if (atomic_dec_and_test(&conn->usage)) {
807 _debug("zombie");
808 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
809 }
810
811 _leave("");
812}
813
814/*
815 * destroy a virtual connection
816 */
817static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
818{
819 _enter("%p{%d}", conn, atomic_read(&conn->usage));
820
821 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
822
823 _net("DESTROY CONN %d", conn->debug_id);
824
825 if (conn->bundle)
826 rxrpc_put_bundle(conn->trans, conn->bundle);
827
828 ASSERT(RB_EMPTY_ROOT(&conn->calls));
829 rxrpc_purge_queue(&conn->rx_queue);
830
831 rxrpc_clear_conn_security(conn);
832 rxrpc_put_transport(conn->trans);
833 kfree(conn);
834 _leave("");
835}
836
837/*
838 * reap dead connections
839 */
840static void rxrpc_connection_reaper(struct work_struct *work)
841{
842 struct rxrpc_connection *conn, *_p;
843 unsigned long now, earliest, reap_time;
844
845 LIST_HEAD(graveyard);
846
847 _enter("");
848
849 now = get_seconds();
850 earliest = ULONG_MAX;
851
852 write_lock_bh(&rxrpc_connection_lock);
853 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
854 _debug("reap CONN %d { u=%d,t=%ld }",
855 conn->debug_id, atomic_read(&conn->usage),
856 (long) now - (long) conn->put_time);
857
858 if (likely(atomic_read(&conn->usage) > 0))
859 continue;
860
861 spin_lock(&conn->trans->client_lock);
862 write_lock(&conn->trans->conn_lock);
863 reap_time = conn->put_time + rxrpc_connection_timeout;
864
865 if (atomic_read(&conn->usage) > 0) {
866 ;
867 } else if (reap_time <= now) {
868 list_move_tail(&conn->link, &graveyard);
869 if (conn->out_clientflag)
870 rb_erase(&conn->node,
871 &conn->trans->client_conns);
872 else
873 rb_erase(&conn->node,
874 &conn->trans->server_conns);
875 if (conn->bundle) {
876 list_del_init(&conn->bundle_link);
877 conn->bundle->num_conns--;
878 }
879
880 } else if (reap_time < earliest) {
881 earliest = reap_time;
882 }
883
884 write_unlock(&conn->trans->conn_lock);
885 spin_unlock(&conn->trans->client_lock);
886 }
887 write_unlock_bh(&rxrpc_connection_lock);
888
889 if (earliest != ULONG_MAX) {
890 _debug("reschedule reaper %ld", (long) earliest - now);
891 ASSERTCMP(earliest, >, now);
892 rxrpc_queue_delayed_work(&rxrpc_connection_reap,
893 (earliest - now) * HZ);
894 }
895
896 /* then destroy all those pulled out */
897 while (!list_empty(&graveyard)) {
898 conn = list_entry(graveyard.next, struct rxrpc_connection,
899 link);
900 list_del_init(&conn->link);
901
902 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
903 rxrpc_destroy_connection(conn);
904 }
905
906 _leave("");
907}
908
909/*
910 * preemptively destroy all the connection records rather than waiting for them
911 * to time out
912 */
913void __exit rxrpc_destroy_all_connections(void)
914{
915 _enter("");
916
917 rxrpc_connection_timeout = 0;
918 cancel_delayed_work(&rxrpc_connection_reap);
919 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
920
921 _leave("");
922}
1/* RxRPC virtual connection handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/net.h>
15#include <linux/skbuff.h>
16#include <linux/crypto.h>
17#include <net/sock.h>
18#include <net/af_rxrpc.h>
19#include "ar-internal.h"
20
21/*
22 * Time till a connection expires after last use (in seconds).
23 */
24unsigned rxrpc_connection_expiry = 10 * 60;
25
26static void rxrpc_connection_reaper(struct work_struct *work);
27
28LIST_HEAD(rxrpc_connections);
29DEFINE_RWLOCK(rxrpc_connection_lock);
30static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
31
32/*
33 * allocate a new client connection bundle
34 */
35static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
36{
37 struct rxrpc_conn_bundle *bundle;
38
39 _enter("");
40
41 bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
42 if (bundle) {
43 INIT_LIST_HEAD(&bundle->unused_conns);
44 INIT_LIST_HEAD(&bundle->avail_conns);
45 INIT_LIST_HEAD(&bundle->busy_conns);
46 init_waitqueue_head(&bundle->chanwait);
47 atomic_set(&bundle->usage, 1);
48 }
49
50 _leave(" = %p", bundle);
51 return bundle;
52}
53
54/*
55 * compare bundle parameters with what we're looking for
56 * - return -ve, 0 or +ve
57 */
58static inline
59int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
60 struct key *key, __be16 service_id)
61{
62 return (bundle->service_id - service_id) ?:
63 ((unsigned long) bundle->key - (unsigned long) key);
64}
65
66/*
67 * get bundle of client connections that a client socket can make use of
68 */
69struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
70 struct rxrpc_transport *trans,
71 struct key *key,
72 __be16 service_id,
73 gfp_t gfp)
74{
75 struct rxrpc_conn_bundle *bundle, *candidate;
76 struct rb_node *p, *parent, **pp;
77
78 _enter("%p{%x},%x,%hx,",
79 rx, key_serial(key), trans->debug_id, ntohs(service_id));
80
81 if (rx->trans == trans && rx->bundle) {
82 atomic_inc(&rx->bundle->usage);
83 return rx->bundle;
84 }
85
86 /* search the extant bundles first for one that matches the specified
87 * user ID */
88 spin_lock(&trans->client_lock);
89
90 p = trans->bundles.rb_node;
91 while (p) {
92 bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
93
94 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
95 p = p->rb_left;
96 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
97 p = p->rb_right;
98 else
99 goto found_extant_bundle;
100 }
101
102 spin_unlock(&trans->client_lock);
103
104 /* not yet present - create a candidate for a new record and then
105 * redo the search */
106 candidate = rxrpc_alloc_bundle(gfp);
107 if (!candidate) {
108 _leave(" = -ENOMEM");
109 return ERR_PTR(-ENOMEM);
110 }
111
112 candidate->key = key_get(key);
113 candidate->service_id = service_id;
114
115 spin_lock(&trans->client_lock);
116
117 pp = &trans->bundles.rb_node;
118 parent = NULL;
119 while (*pp) {
120 parent = *pp;
121 bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
122
123 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
124 pp = &(*pp)->rb_left;
125 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
126 pp = &(*pp)->rb_right;
127 else
128 goto found_extant_second;
129 }
130
131 /* second search also failed; add the new bundle */
132 bundle = candidate;
133 candidate = NULL;
134
135 rb_link_node(&bundle->node, parent, pp);
136 rb_insert_color(&bundle->node, &trans->bundles);
137 spin_unlock(&trans->client_lock);
138 _net("BUNDLE new on trans %d", trans->debug_id);
139 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
140 atomic_inc(&bundle->usage);
141 rx->bundle = bundle;
142 }
143 _leave(" = %p [new]", bundle);
144 return bundle;
145
146 /* we found the bundle in the list immediately */
147found_extant_bundle:
148 atomic_inc(&bundle->usage);
149 spin_unlock(&trans->client_lock);
150 _net("BUNDLE old on trans %d", trans->debug_id);
151 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
152 atomic_inc(&bundle->usage);
153 rx->bundle = bundle;
154 }
155 _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
156 return bundle;
157
158 /* we found the bundle on the second time through the list */
159found_extant_second:
160 atomic_inc(&bundle->usage);
161 spin_unlock(&trans->client_lock);
162 kfree(candidate);
163 _net("BUNDLE old2 on trans %d", trans->debug_id);
164 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
165 atomic_inc(&bundle->usage);
166 rx->bundle = bundle;
167 }
168 _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
169 return bundle;
170}
171
172/*
173 * release a bundle
174 */
175void rxrpc_put_bundle(struct rxrpc_transport *trans,
176 struct rxrpc_conn_bundle *bundle)
177{
178 _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
179
180 if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
181 _debug("Destroy bundle");
182 rb_erase(&bundle->node, &trans->bundles);
183 spin_unlock(&trans->client_lock);
184 ASSERT(list_empty(&bundle->unused_conns));
185 ASSERT(list_empty(&bundle->avail_conns));
186 ASSERT(list_empty(&bundle->busy_conns));
187 ASSERTCMP(bundle->num_conns, ==, 0);
188 key_put(bundle->key);
189 kfree(bundle);
190 }
191
192 _leave("");
193}
194
195/*
196 * allocate a new connection
197 */
198static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
199{
200 struct rxrpc_connection *conn;
201
202 _enter("");
203
204 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
205 if (conn) {
206 INIT_WORK(&conn->processor, &rxrpc_process_connection);
207 INIT_LIST_HEAD(&conn->bundle_link);
208 conn->calls = RB_ROOT;
209 skb_queue_head_init(&conn->rx_queue);
210 rwlock_init(&conn->lock);
211 spin_lock_init(&conn->state_lock);
212 atomic_set(&conn->usage, 1);
213 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
214 conn->avail_calls = RXRPC_MAXCALLS;
215 conn->size_align = 4;
216 conn->header_size = sizeof(struct rxrpc_header);
217 }
218
219 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
220 return conn;
221}
222
223/*
224 * assign a connection ID to a connection and add it to the transport's
225 * connection lookup tree
226 * - called with transport client lock held
227 */
228static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
229{
230 struct rxrpc_connection *xconn;
231 struct rb_node *parent, **p;
232 __be32 epoch;
233 u32 real_conn_id;
234
235 _enter("");
236
237 epoch = conn->epoch;
238
239 write_lock_bh(&conn->trans->conn_lock);
240
241 conn->trans->conn_idcounter += RXRPC_CID_INC;
242 if (conn->trans->conn_idcounter < RXRPC_CID_INC)
243 conn->trans->conn_idcounter = RXRPC_CID_INC;
244 real_conn_id = conn->trans->conn_idcounter;
245
246attempt_insertion:
247 parent = NULL;
248 p = &conn->trans->client_conns.rb_node;
249
250 while (*p) {
251 parent = *p;
252 xconn = rb_entry(parent, struct rxrpc_connection, node);
253
254 if (epoch < xconn->epoch)
255 p = &(*p)->rb_left;
256 else if (epoch > xconn->epoch)
257 p = &(*p)->rb_right;
258 else if (real_conn_id < xconn->real_conn_id)
259 p = &(*p)->rb_left;
260 else if (real_conn_id > xconn->real_conn_id)
261 p = &(*p)->rb_right;
262 else
263 goto id_exists;
264 }
265
266 /* we've found a suitable hole - arrange for this connection to occupy
267 * it */
268 rb_link_node(&conn->node, parent, p);
269 rb_insert_color(&conn->node, &conn->trans->client_conns);
270
271 conn->real_conn_id = real_conn_id;
272 conn->cid = htonl(real_conn_id);
273 write_unlock_bh(&conn->trans->conn_lock);
274 _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid));
275 return;
276
277 /* we found a connection with the proposed ID - walk the tree from that
278 * point looking for the next unused ID */
279id_exists:
280 for (;;) {
281 real_conn_id += RXRPC_CID_INC;
282 if (real_conn_id < RXRPC_CID_INC) {
283 real_conn_id = RXRPC_CID_INC;
284 conn->trans->conn_idcounter = real_conn_id;
285 goto attempt_insertion;
286 }
287
288 parent = rb_next(parent);
289 if (!parent)
290 goto attempt_insertion;
291
292 xconn = rb_entry(parent, struct rxrpc_connection, node);
293 if (epoch < xconn->epoch ||
294 real_conn_id < xconn->real_conn_id)
295 goto attempt_insertion;
296 }
297}
298
299/*
300 * add a call to a connection's call-by-ID tree
301 */
302static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
303 struct rxrpc_call *call)
304{
305 struct rxrpc_call *xcall;
306 struct rb_node *parent, **p;
307 __be32 call_id;
308
309 write_lock_bh(&conn->lock);
310
311 call_id = call->call_id;
312 p = &conn->calls.rb_node;
313 parent = NULL;
314 while (*p) {
315 parent = *p;
316 xcall = rb_entry(parent, struct rxrpc_call, conn_node);
317
318 if (call_id < xcall->call_id)
319 p = &(*p)->rb_left;
320 else if (call_id > xcall->call_id)
321 p = &(*p)->rb_right;
322 else
323 BUG();
324 }
325
326 rb_link_node(&call->conn_node, parent, p);
327 rb_insert_color(&call->conn_node, &conn->calls);
328
329 write_unlock_bh(&conn->lock);
330}
331
332/*
333 * connect a call on an exclusive connection
334 */
335static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
336 struct rxrpc_transport *trans,
337 __be16 service_id,
338 struct rxrpc_call *call,
339 gfp_t gfp)
340{
341 struct rxrpc_connection *conn;
342 int chan, ret;
343
344 _enter("");
345
346 conn = rx->conn;
347 if (!conn) {
348 /* not yet present - create a candidate for a new connection
349 * and then redo the check */
350 conn = rxrpc_alloc_connection(gfp);
351 if (!conn) {
352 _leave(" = -ENOMEM");
353 return -ENOMEM;
354 }
355
356 conn->trans = trans;
357 conn->bundle = NULL;
358 conn->service_id = service_id;
359 conn->epoch = rxrpc_epoch;
360 conn->in_clientflag = 0;
361 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
362 conn->cid = 0;
363 conn->state = RXRPC_CONN_CLIENT;
364 conn->avail_calls = RXRPC_MAXCALLS - 1;
365 conn->security_level = rx->min_sec_level;
366 conn->key = key_get(rx->key);
367
368 ret = rxrpc_init_client_conn_security(conn);
369 if (ret < 0) {
370 key_put(conn->key);
371 kfree(conn);
372 _leave(" = %d [key]", ret);
373 return ret;
374 }
375
376 write_lock_bh(&rxrpc_connection_lock);
377 list_add_tail(&conn->link, &rxrpc_connections);
378 write_unlock_bh(&rxrpc_connection_lock);
379
380 spin_lock(&trans->client_lock);
381 atomic_inc(&trans->usage);
382
383 _net("CONNECT EXCL new %d on TRANS %d",
384 conn->debug_id, conn->trans->debug_id);
385
386 rxrpc_assign_connection_id(conn);
387 rx->conn = conn;
388 } else {
389 spin_lock(&trans->client_lock);
390 }
391
392 /* we've got a connection with a free channel and we can now attach the
393 * call to it
394 * - we're holding the transport's client lock
395 * - we're holding a reference on the connection
396 */
397 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
398 if (!conn->channels[chan])
399 goto found_channel;
400 goto no_free_channels;
401
402found_channel:
403 atomic_inc(&conn->usage);
404 conn->channels[chan] = call;
405 call->conn = conn;
406 call->channel = chan;
407 call->cid = conn->cid | htonl(chan);
408 call->call_id = htonl(++conn->call_counter);
409
410 _net("CONNECT client on conn %d chan %d as call %x",
411 conn->debug_id, chan, ntohl(call->call_id));
412
413 spin_unlock(&trans->client_lock);
414
415 rxrpc_add_call_ID_to_conn(conn, call);
416 _leave(" = 0");
417 return 0;
418
419no_free_channels:
420 spin_unlock(&trans->client_lock);
421 _leave(" = -ENOSR");
422 return -ENOSR;
423}
424
425/*
426 * find a connection for a call
427 * - called in process context with IRQs enabled
428 */
429int rxrpc_connect_call(struct rxrpc_sock *rx,
430 struct rxrpc_transport *trans,
431 struct rxrpc_conn_bundle *bundle,
432 struct rxrpc_call *call,
433 gfp_t gfp)
434{
435 struct rxrpc_connection *conn, *candidate;
436 int chan, ret;
437
438 DECLARE_WAITQUEUE(myself, current);
439
440 _enter("%p,%lx,", rx, call->user_call_ID);
441
442 if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags))
443 return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
444 call, gfp);
445
446 spin_lock(&trans->client_lock);
447 for (;;) {
448 /* see if the bundle has a call slot available */
449 if (!list_empty(&bundle->avail_conns)) {
450 _debug("avail");
451 conn = list_entry(bundle->avail_conns.next,
452 struct rxrpc_connection,
453 bundle_link);
454 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
455 list_del_init(&conn->bundle_link);
456 bundle->num_conns--;
457 continue;
458 }
459 if (--conn->avail_calls == 0)
460 list_move(&conn->bundle_link,
461 &bundle->busy_conns);
462 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
463 ASSERT(conn->channels[0] == NULL ||
464 conn->channels[1] == NULL ||
465 conn->channels[2] == NULL ||
466 conn->channels[3] == NULL);
467 atomic_inc(&conn->usage);
468 break;
469 }
470
471 if (!list_empty(&bundle->unused_conns)) {
472 _debug("unused");
473 conn = list_entry(bundle->unused_conns.next,
474 struct rxrpc_connection,
475 bundle_link);
476 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
477 list_del_init(&conn->bundle_link);
478 bundle->num_conns--;
479 continue;
480 }
481 ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS);
482 conn->avail_calls = RXRPC_MAXCALLS - 1;
483 ASSERT(conn->channels[0] == NULL &&
484 conn->channels[1] == NULL &&
485 conn->channels[2] == NULL &&
486 conn->channels[3] == NULL);
487 atomic_inc(&conn->usage);
488 list_move(&conn->bundle_link, &bundle->avail_conns);
489 break;
490 }
491
492 /* need to allocate a new connection */
493 _debug("get new conn [%d]", bundle->num_conns);
494
495 spin_unlock(&trans->client_lock);
496
497 if (signal_pending(current))
498 goto interrupted;
499
500 if (bundle->num_conns >= 20) {
501 _debug("too many conns");
502
503 if (!(gfp & __GFP_WAIT)) {
504 _leave(" = -EAGAIN");
505 return -EAGAIN;
506 }
507
508 add_wait_queue(&bundle->chanwait, &myself);
509 for (;;) {
510 set_current_state(TASK_INTERRUPTIBLE);
511 if (bundle->num_conns < 20 ||
512 !list_empty(&bundle->unused_conns) ||
513 !list_empty(&bundle->avail_conns))
514 break;
515 if (signal_pending(current))
516 goto interrupted_dequeue;
517 schedule();
518 }
519 remove_wait_queue(&bundle->chanwait, &myself);
520 __set_current_state(TASK_RUNNING);
521 spin_lock(&trans->client_lock);
522 continue;
523 }
524
525 /* not yet present - create a candidate for a new connection and then
526 * redo the check */
527 candidate = rxrpc_alloc_connection(gfp);
528 if (!candidate) {
529 _leave(" = -ENOMEM");
530 return -ENOMEM;
531 }
532
533 candidate->trans = trans;
534 candidate->bundle = bundle;
535 candidate->service_id = bundle->service_id;
536 candidate->epoch = rxrpc_epoch;
537 candidate->in_clientflag = 0;
538 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
539 candidate->cid = 0;
540 candidate->state = RXRPC_CONN_CLIENT;
541 candidate->avail_calls = RXRPC_MAXCALLS;
542 candidate->security_level = rx->min_sec_level;
543 candidate->key = key_get(bundle->key);
544
545 ret = rxrpc_init_client_conn_security(candidate);
546 if (ret < 0) {
547 key_put(candidate->key);
548 kfree(candidate);
549 _leave(" = %d [key]", ret);
550 return ret;
551 }
552
553 write_lock_bh(&rxrpc_connection_lock);
554 list_add_tail(&candidate->link, &rxrpc_connections);
555 write_unlock_bh(&rxrpc_connection_lock);
556
557 spin_lock(&trans->client_lock);
558
559 list_add(&candidate->bundle_link, &bundle->unused_conns);
560 bundle->num_conns++;
561 atomic_inc(&bundle->usage);
562 atomic_inc(&trans->usage);
563
564 _net("CONNECT new %d on TRANS %d",
565 candidate->debug_id, candidate->trans->debug_id);
566
567 rxrpc_assign_connection_id(candidate);
568 if (candidate->security)
569 candidate->security->prime_packet_security(candidate);
570
571 /* leave the candidate lurking in zombie mode attached to the
572 * bundle until we're ready for it */
573 rxrpc_put_connection(candidate);
574 candidate = NULL;
575 }
576
577 /* we've got a connection with a free channel and we can now attach the
578 * call to it
579 * - we're holding the transport's client lock
580 * - we're holding a reference on the connection
581 * - we're holding a reference on the bundle
582 */
583 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
584 if (!conn->channels[chan])
585 goto found_channel;
586 ASSERT(conn->channels[0] == NULL ||
587 conn->channels[1] == NULL ||
588 conn->channels[2] == NULL ||
589 conn->channels[3] == NULL);
590 BUG();
591
592found_channel:
593 conn->channels[chan] = call;
594 call->conn = conn;
595 call->channel = chan;
596 call->cid = conn->cid | htonl(chan);
597 call->call_id = htonl(++conn->call_counter);
598
599 _net("CONNECT client on conn %d chan %d as call %x",
600 conn->debug_id, chan, ntohl(call->call_id));
601
602 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
603 spin_unlock(&trans->client_lock);
604
605 rxrpc_add_call_ID_to_conn(conn, call);
606
607 _leave(" = 0");
608 return 0;
609
610interrupted_dequeue:
611 remove_wait_queue(&bundle->chanwait, &myself);
612 __set_current_state(TASK_RUNNING);
613interrupted:
614 _leave(" = -ERESTARTSYS");
615 return -ERESTARTSYS;
616}
617
618/*
619 * get a record of an incoming connection
620 */
621struct rxrpc_connection *
622rxrpc_incoming_connection(struct rxrpc_transport *trans,
623 struct rxrpc_header *hdr,
624 gfp_t gfp)
625{
626 struct rxrpc_connection *conn, *candidate = NULL;
627 struct rb_node *p, **pp;
628 const char *new = "old";
629 __be32 epoch;
630 u32 conn_id;
631
632 _enter("");
633
634 ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
635
636 epoch = hdr->epoch;
637 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
638
639 /* search the connection list first */
640 read_lock_bh(&trans->conn_lock);
641
642 p = trans->server_conns.rb_node;
643 while (p) {
644 conn = rb_entry(p, struct rxrpc_connection, node);
645
646 _debug("maybe %x", conn->real_conn_id);
647
648 if (epoch < conn->epoch)
649 p = p->rb_left;
650 else if (epoch > conn->epoch)
651 p = p->rb_right;
652 else if (conn_id < conn->real_conn_id)
653 p = p->rb_left;
654 else if (conn_id > conn->real_conn_id)
655 p = p->rb_right;
656 else
657 goto found_extant_connection;
658 }
659 read_unlock_bh(&trans->conn_lock);
660
661 /* not yet present - create a candidate for a new record and then
662 * redo the search */
663 candidate = rxrpc_alloc_connection(gfp);
664 if (!candidate) {
665 _leave(" = -ENOMEM");
666 return ERR_PTR(-ENOMEM);
667 }
668
669 candidate->trans = trans;
670 candidate->epoch = hdr->epoch;
671 candidate->cid = hdr->cid & cpu_to_be32(RXRPC_CIDMASK);
672 candidate->service_id = hdr->serviceId;
673 candidate->security_ix = hdr->securityIndex;
674 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
675 candidate->out_clientflag = 0;
676 candidate->real_conn_id = conn_id;
677 candidate->state = RXRPC_CONN_SERVER;
678 if (candidate->service_id)
679 candidate->state = RXRPC_CONN_SERVER_UNSECURED;
680
681 write_lock_bh(&trans->conn_lock);
682
683 pp = &trans->server_conns.rb_node;
684 p = NULL;
685 while (*pp) {
686 p = *pp;
687 conn = rb_entry(p, struct rxrpc_connection, node);
688
689 if (epoch < conn->epoch)
690 pp = &(*pp)->rb_left;
691 else if (epoch > conn->epoch)
692 pp = &(*pp)->rb_right;
693 else if (conn_id < conn->real_conn_id)
694 pp = &(*pp)->rb_left;
695 else if (conn_id > conn->real_conn_id)
696 pp = &(*pp)->rb_right;
697 else
698 goto found_extant_second;
699 }
700
701 /* we can now add the new candidate to the list */
702 conn = candidate;
703 candidate = NULL;
704 rb_link_node(&conn->node, p, pp);
705 rb_insert_color(&conn->node, &trans->server_conns);
706 atomic_inc(&conn->trans->usage);
707
708 write_unlock_bh(&trans->conn_lock);
709
710 write_lock_bh(&rxrpc_connection_lock);
711 list_add_tail(&conn->link, &rxrpc_connections);
712 write_unlock_bh(&rxrpc_connection_lock);
713
714 new = "new";
715
716success:
717 _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id);
718
719 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
720 return conn;
721
722 /* we found the connection in the list immediately */
723found_extant_connection:
724 if (hdr->securityIndex != conn->security_ix) {
725 read_unlock_bh(&trans->conn_lock);
726 goto security_mismatch;
727 }
728 atomic_inc(&conn->usage);
729 read_unlock_bh(&trans->conn_lock);
730 goto success;
731
732 /* we found the connection on the second time through the list */
733found_extant_second:
734 if (hdr->securityIndex != conn->security_ix) {
735 write_unlock_bh(&trans->conn_lock);
736 goto security_mismatch;
737 }
738 atomic_inc(&conn->usage);
739 write_unlock_bh(&trans->conn_lock);
740 kfree(candidate);
741 goto success;
742
743security_mismatch:
744 kfree(candidate);
745 _leave(" = -EKEYREJECTED");
746 return ERR_PTR(-EKEYREJECTED);
747}
748
749/*
750 * find a connection based on transport and RxRPC connection ID for an incoming
751 * packet
752 */
753struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
754 struct rxrpc_header *hdr)
755{
756 struct rxrpc_connection *conn;
757 struct rb_node *p;
758 __be32 epoch;
759 u32 conn_id;
760
761 _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags);
762
763 read_lock_bh(&trans->conn_lock);
764
765 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
766 epoch = hdr->epoch;
767
768 if (hdr->flags & RXRPC_CLIENT_INITIATED)
769 p = trans->server_conns.rb_node;
770 else
771 p = trans->client_conns.rb_node;
772
773 while (p) {
774 conn = rb_entry(p, struct rxrpc_connection, node);
775
776 _debug("maybe %x", conn->real_conn_id);
777
778 if (epoch < conn->epoch)
779 p = p->rb_left;
780 else if (epoch > conn->epoch)
781 p = p->rb_right;
782 else if (conn_id < conn->real_conn_id)
783 p = p->rb_left;
784 else if (conn_id > conn->real_conn_id)
785 p = p->rb_right;
786 else
787 goto found;
788 }
789
790 read_unlock_bh(&trans->conn_lock);
791 _leave(" = NULL");
792 return NULL;
793
794found:
795 atomic_inc(&conn->usage);
796 read_unlock_bh(&trans->conn_lock);
797 _leave(" = %p", conn);
798 return conn;
799}
800
801/*
802 * release a virtual connection
803 */
804void rxrpc_put_connection(struct rxrpc_connection *conn)
805{
806 _enter("%p{u=%d,d=%d}",
807 conn, atomic_read(&conn->usage), conn->debug_id);
808
809 ASSERTCMP(atomic_read(&conn->usage), >, 0);
810
811 conn->put_time = get_seconds();
812 if (atomic_dec_and_test(&conn->usage)) {
813 _debug("zombie");
814 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
815 }
816
817 _leave("");
818}
819
820/*
821 * destroy a virtual connection
822 */
823static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
824{
825 _enter("%p{%d}", conn, atomic_read(&conn->usage));
826
827 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
828
829 _net("DESTROY CONN %d", conn->debug_id);
830
831 if (conn->bundle)
832 rxrpc_put_bundle(conn->trans, conn->bundle);
833
834 ASSERT(RB_EMPTY_ROOT(&conn->calls));
835 rxrpc_purge_queue(&conn->rx_queue);
836
837 rxrpc_clear_conn_security(conn);
838 rxrpc_put_transport(conn->trans);
839 kfree(conn);
840 _leave("");
841}
842
843/*
844 * reap dead connections
845 */
846static void rxrpc_connection_reaper(struct work_struct *work)
847{
848 struct rxrpc_connection *conn, *_p;
849 unsigned long now, earliest, reap_time;
850
851 LIST_HEAD(graveyard);
852
853 _enter("");
854
855 now = get_seconds();
856 earliest = ULONG_MAX;
857
858 write_lock_bh(&rxrpc_connection_lock);
859 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
860 _debug("reap CONN %d { u=%d,t=%ld }",
861 conn->debug_id, atomic_read(&conn->usage),
862 (long) now - (long) conn->put_time);
863
864 if (likely(atomic_read(&conn->usage) > 0))
865 continue;
866
867 spin_lock(&conn->trans->client_lock);
868 write_lock(&conn->trans->conn_lock);
869 reap_time = conn->put_time + rxrpc_connection_expiry;
870
871 if (atomic_read(&conn->usage) > 0) {
872 ;
873 } else if (reap_time <= now) {
874 list_move_tail(&conn->link, &graveyard);
875 if (conn->out_clientflag)
876 rb_erase(&conn->node,
877 &conn->trans->client_conns);
878 else
879 rb_erase(&conn->node,
880 &conn->trans->server_conns);
881 if (conn->bundle) {
882 list_del_init(&conn->bundle_link);
883 conn->bundle->num_conns--;
884 }
885
886 } else if (reap_time < earliest) {
887 earliest = reap_time;
888 }
889
890 write_unlock(&conn->trans->conn_lock);
891 spin_unlock(&conn->trans->client_lock);
892 }
893 write_unlock_bh(&rxrpc_connection_lock);
894
895 if (earliest != ULONG_MAX) {
896 _debug("reschedule reaper %ld", (long) earliest - now);
897 ASSERTCMP(earliest, >, now);
898 rxrpc_queue_delayed_work(&rxrpc_connection_reap,
899 (earliest - now) * HZ);
900 }
901
902 /* then destroy all those pulled out */
903 while (!list_empty(&graveyard)) {
904 conn = list_entry(graveyard.next, struct rxrpc_connection,
905 link);
906 list_del_init(&conn->link);
907
908 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
909 rxrpc_destroy_connection(conn);
910 }
911
912 _leave("");
913}
914
915/*
916 * preemptively destroy all the connection records rather than waiting for them
917 * to time out
918 */
919void __exit rxrpc_destroy_all_connections(void)
920{
921 _enter("");
922
923 rxrpc_connection_expiry = 0;
924 cancel_delayed_work(&rxrpc_connection_reap);
925 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
926
927 _leave("");
928}