Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
Note: File does not exist in v5.9.
  1/* RxRPC virtual connection handler
  2 *
  3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4 * Written by David Howells (dhowells@redhat.com)
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the License, or (at your option) any later version.
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/slab.h>
 14#include <linux/net.h>
 15#include <linux/skbuff.h>
 16#include <linux/crypto.h>
 17#include <net/sock.h>
 18#include <net/af_rxrpc.h>
 19#include "ar-internal.h"
 20
 21/*
 22 * Time till a connection expires after last use (in seconds).
 23 */
 24unsigned int rxrpc_connection_expiry = 10 * 60;
 25
 26static void rxrpc_connection_reaper(struct work_struct *work);
 27
 28LIST_HEAD(rxrpc_connections);
 29DEFINE_RWLOCK(rxrpc_connection_lock);
 30static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
 31
 32/*
 33 * allocate a new client connection bundle
 34 */
 35static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
 36{
 37	struct rxrpc_conn_bundle *bundle;
 38
 39	_enter("");
 40
 41	bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
 42	if (bundle) {
 43		INIT_LIST_HEAD(&bundle->unused_conns);
 44		INIT_LIST_HEAD(&bundle->avail_conns);
 45		INIT_LIST_HEAD(&bundle->busy_conns);
 46		init_waitqueue_head(&bundle->chanwait);
 47		atomic_set(&bundle->usage, 1);
 48	}
 49
 50	_leave(" = %p", bundle);
 51	return bundle;
 52}
 53
 54/*
 55 * compare bundle parameters with what we're looking for
 56 * - return -ve, 0 or +ve
 57 */
 58static inline
 59int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
 60		     struct key *key, u16 service_id)
 61{
 62	return (bundle->service_id - service_id) ?:
 63		((unsigned long)bundle->key - (unsigned long)key);
 64}
 65
 66/*
 67 * get bundle of client connections that a client socket can make use of
 68 */
 69struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
 70					   struct rxrpc_transport *trans,
 71					   struct key *key,
 72					   u16 service_id,
 73					   gfp_t gfp)
 74{
 75	struct rxrpc_conn_bundle *bundle, *candidate;
 76	struct rb_node *p, *parent, **pp;
 77
 78	_enter("%p{%x},%x,%hx,",
 79	       rx, key_serial(key), trans->debug_id, service_id);
 80
 81	if (rx->trans == trans && rx->bundle) {
 82		atomic_inc(&rx->bundle->usage);
 83		return rx->bundle;
 84	}
 85
 86	/* search the extant bundles first for one that matches the specified
 87	 * user ID */
 88	spin_lock(&trans->client_lock);
 89
 90	p = trans->bundles.rb_node;
 91	while (p) {
 92		bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
 93
 94		if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
 95			p = p->rb_left;
 96		else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
 97			p = p->rb_right;
 98		else
 99			goto found_extant_bundle;
100	}
101
102	spin_unlock(&trans->client_lock);
103
104	/* not yet present - create a candidate for a new record and then
105	 * redo the search */
106	candidate = rxrpc_alloc_bundle(gfp);
107	if (!candidate) {
108		_leave(" = -ENOMEM");
109		return ERR_PTR(-ENOMEM);
110	}
111
112	candidate->key = key_get(key);
113	candidate->service_id = service_id;
114
115	spin_lock(&trans->client_lock);
116
117	pp = &trans->bundles.rb_node;
118	parent = NULL;
119	while (*pp) {
120		parent = *pp;
121		bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
122
123		if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
124			pp = &(*pp)->rb_left;
125		else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
126			pp = &(*pp)->rb_right;
127		else
128			goto found_extant_second;
129	}
130
131	/* second search also failed; add the new bundle */
132	bundle = candidate;
133	candidate = NULL;
134
135	rb_link_node(&bundle->node, parent, pp);
136	rb_insert_color(&bundle->node, &trans->bundles);
137	spin_unlock(&trans->client_lock);
138	_net("BUNDLE new on trans %d", trans->debug_id);
139	if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
140		atomic_inc(&bundle->usage);
141		rx->bundle = bundle;
142	}
143	_leave(" = %p [new]", bundle);
144	return bundle;
145
146	/* we found the bundle in the list immediately */
147found_extant_bundle:
148	atomic_inc(&bundle->usage);
149	spin_unlock(&trans->client_lock);
150	_net("BUNDLE old on trans %d", trans->debug_id);
151	if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
152		atomic_inc(&bundle->usage);
153		rx->bundle = bundle;
154	}
155	_leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
156	return bundle;
157
158	/* we found the bundle on the second time through the list */
159found_extant_second:
160	atomic_inc(&bundle->usage);
161	spin_unlock(&trans->client_lock);
162	kfree(candidate);
163	_net("BUNDLE old2 on trans %d", trans->debug_id);
164	if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
165		atomic_inc(&bundle->usage);
166		rx->bundle = bundle;
167	}
168	_leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
169	return bundle;
170}
171
172/*
173 * release a bundle
174 */
175void rxrpc_put_bundle(struct rxrpc_transport *trans,
176		      struct rxrpc_conn_bundle *bundle)
177{
178	_enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
179
180	if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
181		_debug("Destroy bundle");
182		rb_erase(&bundle->node, &trans->bundles);
183		spin_unlock(&trans->client_lock);
184		ASSERT(list_empty(&bundle->unused_conns));
185		ASSERT(list_empty(&bundle->avail_conns));
186		ASSERT(list_empty(&bundle->busy_conns));
187		ASSERTCMP(bundle->num_conns, ==, 0);
188		key_put(bundle->key);
189		kfree(bundle);
190	}
191
192	_leave("");
193}
194
195/*
196 * allocate a new connection
197 */
198static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
199{
200	struct rxrpc_connection *conn;
201
202	_enter("");
203
204	conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
205	if (conn) {
206		INIT_WORK(&conn->processor, &rxrpc_process_connection);
207		INIT_LIST_HEAD(&conn->bundle_link);
208		conn->calls = RB_ROOT;
209		skb_queue_head_init(&conn->rx_queue);
210		rwlock_init(&conn->lock);
211		spin_lock_init(&conn->state_lock);
212		atomic_set(&conn->usage, 1);
213		conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
214		conn->avail_calls = RXRPC_MAXCALLS;
215		conn->size_align = 4;
216		conn->header_size = sizeof(struct rxrpc_wire_header);
217	}
218
219	_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
220	return conn;
221}
222
223/*
224 * assign a connection ID to a connection and add it to the transport's
225 * connection lookup tree
226 * - called with transport client lock held
227 */
228static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
229{
230	struct rxrpc_connection *xconn;
231	struct rb_node *parent, **p;
232	__be32 epoch;
233	u32 cid;
234
235	_enter("");
236
237	epoch = conn->epoch;
238
239	write_lock_bh(&conn->trans->conn_lock);
240
241	conn->trans->conn_idcounter += RXRPC_CID_INC;
242	if (conn->trans->conn_idcounter < RXRPC_CID_INC)
243		conn->trans->conn_idcounter = RXRPC_CID_INC;
244	cid = conn->trans->conn_idcounter;
245
246attempt_insertion:
247	parent = NULL;
248	p = &conn->trans->client_conns.rb_node;
249
250	while (*p) {
251		parent = *p;
252		xconn = rb_entry(parent, struct rxrpc_connection, node);
253
254		if (epoch < xconn->epoch)
255			p = &(*p)->rb_left;
256		else if (epoch > xconn->epoch)
257			p = &(*p)->rb_right;
258		else if (cid < xconn->cid)
259			p = &(*p)->rb_left;
260		else if (cid > xconn->cid)
261			p = &(*p)->rb_right;
262		else
263			goto id_exists;
264	}
265
266	/* we've found a suitable hole - arrange for this connection to occupy
267	 * it */
268	rb_link_node(&conn->node, parent, p);
269	rb_insert_color(&conn->node, &conn->trans->client_conns);
270
271	conn->cid = cid;
272	write_unlock_bh(&conn->trans->conn_lock);
273	_leave(" [CID %x]", cid);
274	return;
275
276	/* we found a connection with the proposed ID - walk the tree from that
277	 * point looking for the next unused ID */
278id_exists:
279	for (;;) {
280		cid += RXRPC_CID_INC;
281		if (cid < RXRPC_CID_INC) {
282			cid = RXRPC_CID_INC;
283			conn->trans->conn_idcounter = cid;
284			goto attempt_insertion;
285		}
286
287		parent = rb_next(parent);
288		if (!parent)
289			goto attempt_insertion;
290
291		xconn = rb_entry(parent, struct rxrpc_connection, node);
292		if (epoch < xconn->epoch ||
293		    cid < xconn->cid)
294			goto attempt_insertion;
295	}
296}
297
298/*
299 * add a call to a connection's call-by-ID tree
300 */
301static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
302				      struct rxrpc_call *call)
303{
304	struct rxrpc_call *xcall;
305	struct rb_node *parent, **p;
306	__be32 call_id;
307
308	write_lock_bh(&conn->lock);
309
310	call_id = call->call_id;
311	p = &conn->calls.rb_node;
312	parent = NULL;
313	while (*p) {
314		parent = *p;
315		xcall = rb_entry(parent, struct rxrpc_call, conn_node);
316
317		if (call_id < xcall->call_id)
318			p = &(*p)->rb_left;
319		else if (call_id > xcall->call_id)
320			p = &(*p)->rb_right;
321		else
322			BUG();
323	}
324
325	rb_link_node(&call->conn_node, parent, p);
326	rb_insert_color(&call->conn_node, &conn->calls);
327
328	write_unlock_bh(&conn->lock);
329}
330
331/*
332 * connect a call on an exclusive connection
333 */
334static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
335				   struct rxrpc_transport *trans,
336				   u16 service_id,
337				   struct rxrpc_call *call,
338				   gfp_t gfp)
339{
340	struct rxrpc_connection *conn;
341	int chan, ret;
342
343	_enter("");
344
345	conn = rx->conn;
346	if (!conn) {
347		/* not yet present - create a candidate for a new connection
348		 * and then redo the check */
349		conn = rxrpc_alloc_connection(gfp);
350		if (!conn) {
351			_leave(" = -ENOMEM");
352			return -ENOMEM;
353		}
354
355		conn->trans = trans;
356		conn->bundle = NULL;
357		conn->service_id = service_id;
358		conn->epoch = rxrpc_epoch;
359		conn->in_clientflag = 0;
360		conn->out_clientflag = RXRPC_CLIENT_INITIATED;
361		conn->cid = 0;
362		conn->state = RXRPC_CONN_CLIENT;
363		conn->avail_calls = RXRPC_MAXCALLS - 1;
364		conn->security_level = rx->min_sec_level;
365		conn->key = key_get(rx->key);
366
367		ret = rxrpc_init_client_conn_security(conn);
368		if (ret < 0) {
369			key_put(conn->key);
370			kfree(conn);
371			_leave(" = %d [key]", ret);
372			return ret;
373		}
374
375		write_lock_bh(&rxrpc_connection_lock);
376		list_add_tail(&conn->link, &rxrpc_connections);
377		write_unlock_bh(&rxrpc_connection_lock);
378
379		spin_lock(&trans->client_lock);
380		atomic_inc(&trans->usage);
381
382		_net("CONNECT EXCL new %d on TRANS %d",
383		     conn->debug_id, conn->trans->debug_id);
384
385		rxrpc_assign_connection_id(conn);
386		rx->conn = conn;
387	} else {
388		spin_lock(&trans->client_lock);
389	}
390
391	/* we've got a connection with a free channel and we can now attach the
392	 * call to it
393	 * - we're holding the transport's client lock
394	 * - we're holding a reference on the connection
395	 */
396	for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
397		if (!conn->channels[chan])
398			goto found_channel;
399	goto no_free_channels;
400
401found_channel:
402	atomic_inc(&conn->usage);
403	conn->channels[chan] = call;
404	call->conn = conn;
405	call->channel = chan;
406	call->cid = conn->cid | chan;
407	call->call_id = ++conn->call_counter;
408
409	_net("CONNECT client on conn %d chan %d as call %x",
410	     conn->debug_id, chan, call->call_id);
411
412	spin_unlock(&trans->client_lock);
413
414	rxrpc_add_call_ID_to_conn(conn, call);
415	_leave(" = 0");
416	return 0;
417
418no_free_channels:
419	spin_unlock(&trans->client_lock);
420	_leave(" = -ENOSR");
421	return -ENOSR;
422}
423
424/*
425 * find a connection for a call
426 * - called in process context with IRQs enabled
427 */
428int rxrpc_connect_call(struct rxrpc_sock *rx,
429		       struct rxrpc_transport *trans,
430		       struct rxrpc_conn_bundle *bundle,
431		       struct rxrpc_call *call,
432		       gfp_t gfp)
433{
434	struct rxrpc_connection *conn, *candidate;
435	int chan, ret;
436
437	DECLARE_WAITQUEUE(myself, current);
438
439	_enter("%p,%lx,", rx, call->user_call_ID);
440
441	if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags))
442		return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
443					       call, gfp);
444
445	spin_lock(&trans->client_lock);
446	for (;;) {
447		/* see if the bundle has a call slot available */
448		if (!list_empty(&bundle->avail_conns)) {
449			_debug("avail");
450			conn = list_entry(bundle->avail_conns.next,
451					  struct rxrpc_connection,
452					  bundle_link);
453			if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
454				list_del_init(&conn->bundle_link);
455				bundle->num_conns--;
456				continue;
457			}
458			if (--conn->avail_calls == 0)
459				list_move(&conn->bundle_link,
460					  &bundle->busy_conns);
461			ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
462			ASSERT(conn->channels[0] == NULL ||
463			       conn->channels[1] == NULL ||
464			       conn->channels[2] == NULL ||
465			       conn->channels[3] == NULL);
466			atomic_inc(&conn->usage);
467			break;
468		}
469
470		if (!list_empty(&bundle->unused_conns)) {
471			_debug("unused");
472			conn = list_entry(bundle->unused_conns.next,
473					  struct rxrpc_connection,
474					  bundle_link);
475			if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
476				list_del_init(&conn->bundle_link);
477				bundle->num_conns--;
478				continue;
479			}
480			ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS);
481			conn->avail_calls = RXRPC_MAXCALLS - 1;
482			ASSERT(conn->channels[0] == NULL &&
483			       conn->channels[1] == NULL &&
484			       conn->channels[2] == NULL &&
485			       conn->channels[3] == NULL);
486			atomic_inc(&conn->usage);
487			list_move(&conn->bundle_link, &bundle->avail_conns);
488			break;
489		}
490
491		/* need to allocate a new connection */
492		_debug("get new conn [%d]", bundle->num_conns);
493
494		spin_unlock(&trans->client_lock);
495
496		if (signal_pending(current))
497			goto interrupted;
498
499		if (bundle->num_conns >= 20) {
500			_debug("too many conns");
501
502			if (!gfpflags_allow_blocking(gfp)) {
503				_leave(" = -EAGAIN");
504				return -EAGAIN;
505			}
506
507			add_wait_queue(&bundle->chanwait, &myself);
508			for (;;) {
509				set_current_state(TASK_INTERRUPTIBLE);
510				if (bundle->num_conns < 20 ||
511				    !list_empty(&bundle->unused_conns) ||
512				    !list_empty(&bundle->avail_conns))
513					break;
514				if (signal_pending(current))
515					goto interrupted_dequeue;
516				schedule();
517			}
518			remove_wait_queue(&bundle->chanwait, &myself);
519			__set_current_state(TASK_RUNNING);
520			spin_lock(&trans->client_lock);
521			continue;
522		}
523
524		/* not yet present - create a candidate for a new connection and then
525		 * redo the check */
526		candidate = rxrpc_alloc_connection(gfp);
527		if (!candidate) {
528			_leave(" = -ENOMEM");
529			return -ENOMEM;
530		}
531
532		candidate->trans = trans;
533		candidate->bundle = bundle;
534		candidate->service_id = bundle->service_id;
535		candidate->epoch = rxrpc_epoch;
536		candidate->in_clientflag = 0;
537		candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
538		candidate->cid = 0;
539		candidate->state = RXRPC_CONN_CLIENT;
540		candidate->avail_calls = RXRPC_MAXCALLS;
541		candidate->security_level = rx->min_sec_level;
542		candidate->key = key_get(bundle->key);
543
544		ret = rxrpc_init_client_conn_security(candidate);
545		if (ret < 0) {
546			key_put(candidate->key);
547			kfree(candidate);
548			_leave(" = %d [key]", ret);
549			return ret;
550		}
551
552		write_lock_bh(&rxrpc_connection_lock);
553		list_add_tail(&candidate->link, &rxrpc_connections);
554		write_unlock_bh(&rxrpc_connection_lock);
555
556		spin_lock(&trans->client_lock);
557
558		list_add(&candidate->bundle_link, &bundle->unused_conns);
559		bundle->num_conns++;
560		atomic_inc(&bundle->usage);
561		atomic_inc(&trans->usage);
562
563		_net("CONNECT new %d on TRANS %d",
564		     candidate->debug_id, candidate->trans->debug_id);
565
566		rxrpc_assign_connection_id(candidate);
567		if (candidate->security)
568			candidate->security->prime_packet_security(candidate);
569
570		/* leave the candidate lurking in zombie mode attached to the
571		 * bundle until we're ready for it */
572		rxrpc_put_connection(candidate);
573		candidate = NULL;
574	}
575
576	/* we've got a connection with a free channel and we can now attach the
577	 * call to it
578	 * - we're holding the transport's client lock
579	 * - we're holding a reference on the connection
580	 * - we're holding a reference on the bundle
581	 */
582	for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
583		if (!conn->channels[chan])
584			goto found_channel;
585	ASSERT(conn->channels[0] == NULL ||
586	       conn->channels[1] == NULL ||
587	       conn->channels[2] == NULL ||
588	       conn->channels[3] == NULL);
589	BUG();
590
591found_channel:
592	conn->channels[chan] = call;
593	call->conn = conn;
594	call->channel = chan;
595	call->cid = conn->cid | chan;
596	call->call_id = ++conn->call_counter;
597
598	_net("CONNECT client on conn %d chan %d as call %x",
599	     conn->debug_id, chan, call->call_id);
600
601	ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
602	spin_unlock(&trans->client_lock);
603
604	rxrpc_add_call_ID_to_conn(conn, call);
605
606	_leave(" = 0");
607	return 0;
608
609interrupted_dequeue:
610	remove_wait_queue(&bundle->chanwait, &myself);
611	__set_current_state(TASK_RUNNING);
612interrupted:
613	_leave(" = -ERESTARTSYS");
614	return -ERESTARTSYS;
615}
616
617/*
618 * get a record of an incoming connection
619 */
620struct rxrpc_connection *
621rxrpc_incoming_connection(struct rxrpc_transport *trans,
622			  struct rxrpc_host_header *hdr,
623			  gfp_t gfp)
624{
625	struct rxrpc_connection *conn, *candidate = NULL;
626	struct rb_node *p, **pp;
627	const char *new = "old";
628	__be32 epoch;
629	u32 cid;
630
631	_enter("");
632
633	ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
634
635	epoch = hdr->epoch;
636	cid = hdr->cid & RXRPC_CIDMASK;
637
638	/* search the connection list first */
639	read_lock_bh(&trans->conn_lock);
640
641	p = trans->server_conns.rb_node;
642	while (p) {
643		conn = rb_entry(p, struct rxrpc_connection, node);
644
645		_debug("maybe %x", conn->cid);
646
647		if (epoch < conn->epoch)
648			p = p->rb_left;
649		else if (epoch > conn->epoch)
650			p = p->rb_right;
651		else if (cid < conn->cid)
652			p = p->rb_left;
653		else if (cid > conn->cid)
654			p = p->rb_right;
655		else
656			goto found_extant_connection;
657	}
658	read_unlock_bh(&trans->conn_lock);
659
660	/* not yet present - create a candidate for a new record and then
661	 * redo the search */
662	candidate = rxrpc_alloc_connection(gfp);
663	if (!candidate) {
664		_leave(" = -ENOMEM");
665		return ERR_PTR(-ENOMEM);
666	}
667
668	candidate->trans = trans;
669	candidate->epoch = hdr->epoch;
670	candidate->cid = hdr->cid & RXRPC_CIDMASK;
671	candidate->service_id = hdr->serviceId;
672	candidate->security_ix = hdr->securityIndex;
673	candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
674	candidate->out_clientflag = 0;
675	candidate->state = RXRPC_CONN_SERVER;
676	if (candidate->service_id)
677		candidate->state = RXRPC_CONN_SERVER_UNSECURED;
678
679	write_lock_bh(&trans->conn_lock);
680
681	pp = &trans->server_conns.rb_node;
682	p = NULL;
683	while (*pp) {
684		p = *pp;
685		conn = rb_entry(p, struct rxrpc_connection, node);
686
687		if (epoch < conn->epoch)
688			pp = &(*pp)->rb_left;
689		else if (epoch > conn->epoch)
690			pp = &(*pp)->rb_right;
691		else if (cid < conn->cid)
692			pp = &(*pp)->rb_left;
693		else if (cid > conn->cid)
694			pp = &(*pp)->rb_right;
695		else
696			goto found_extant_second;
697	}
698
699	/* we can now add the new candidate to the list */
700	conn = candidate;
701	candidate = NULL;
702	rb_link_node(&conn->node, p, pp);
703	rb_insert_color(&conn->node, &trans->server_conns);
704	atomic_inc(&conn->trans->usage);
705
706	write_unlock_bh(&trans->conn_lock);
707
708	write_lock_bh(&rxrpc_connection_lock);
709	list_add_tail(&conn->link, &rxrpc_connections);
710	write_unlock_bh(&rxrpc_connection_lock);
711
712	new = "new";
713
714success:
715	_net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->cid);
716
717	_leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
718	return conn;
719
720	/* we found the connection in the list immediately */
721found_extant_connection:
722	if (hdr->securityIndex != conn->security_ix) {
723		read_unlock_bh(&trans->conn_lock);
724		goto security_mismatch;
725	}
726	atomic_inc(&conn->usage);
727	read_unlock_bh(&trans->conn_lock);
728	goto success;
729
730	/* we found the connection on the second time through the list */
731found_extant_second:
732	if (hdr->securityIndex != conn->security_ix) {
733		write_unlock_bh(&trans->conn_lock);
734		goto security_mismatch;
735	}
736	atomic_inc(&conn->usage);
737	write_unlock_bh(&trans->conn_lock);
738	kfree(candidate);
739	goto success;
740
741security_mismatch:
742	kfree(candidate);
743	_leave(" = -EKEYREJECTED");
744	return ERR_PTR(-EKEYREJECTED);
745}
746
747/*
748 * find a connection based on transport and RxRPC connection ID for an incoming
749 * packet
750 */
751struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
752					       struct rxrpc_host_header *hdr)
753{
754	struct rxrpc_connection *conn;
755	struct rb_node *p;
756	u32 epoch, cid;
757
758	_enter(",{%x,%x}", hdr->cid, hdr->flags);
759
760	read_lock_bh(&trans->conn_lock);
761
762	cid = hdr->cid & RXRPC_CIDMASK;
763	epoch = hdr->epoch;
764
765	if (hdr->flags & RXRPC_CLIENT_INITIATED)
766		p = trans->server_conns.rb_node;
767	else
768		p = trans->client_conns.rb_node;
769
770	while (p) {
771		conn = rb_entry(p, struct rxrpc_connection, node);
772
773		_debug("maybe %x", conn->cid);
774
775		if (epoch < conn->epoch)
776			p = p->rb_left;
777		else if (epoch > conn->epoch)
778			p = p->rb_right;
779		else if (cid < conn->cid)
780			p = p->rb_left;
781		else if (cid > conn->cid)
782			p = p->rb_right;
783		else
784			goto found;
785	}
786
787	read_unlock_bh(&trans->conn_lock);
788	_leave(" = NULL");
789	return NULL;
790
791found:
792	atomic_inc(&conn->usage);
793	read_unlock_bh(&trans->conn_lock);
794	_leave(" = %p", conn);
795	return conn;
796}
797
798/*
799 * release a virtual connection
800 */
801void rxrpc_put_connection(struct rxrpc_connection *conn)
802{
803	_enter("%p{u=%d,d=%d}",
804	       conn, atomic_read(&conn->usage), conn->debug_id);
805
806	ASSERTCMP(atomic_read(&conn->usage), >, 0);
807
808	conn->put_time = ktime_get_seconds();
809	if (atomic_dec_and_test(&conn->usage)) {
810		_debug("zombie");
811		rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
812	}
813
814	_leave("");
815}
816
817/*
818 * destroy a virtual connection
819 */
820static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
821{
822	_enter("%p{%d}", conn, atomic_read(&conn->usage));
823
824	ASSERTCMP(atomic_read(&conn->usage), ==, 0);
825
826	_net("DESTROY CONN %d", conn->debug_id);
827
828	if (conn->bundle)
829		rxrpc_put_bundle(conn->trans, conn->bundle);
830
831	ASSERT(RB_EMPTY_ROOT(&conn->calls));
832	rxrpc_purge_queue(&conn->rx_queue);
833
834	rxrpc_clear_conn_security(conn);
835	rxrpc_put_transport(conn->trans);
836	kfree(conn);
837	_leave("");
838}
839
840/*
841 * reap dead connections
842 */
843static void rxrpc_connection_reaper(struct work_struct *work)
844{
845	struct rxrpc_connection *conn, *_p;
846	unsigned long now, earliest, reap_time;
847
848	LIST_HEAD(graveyard);
849
850	_enter("");
851
852	now = ktime_get_seconds();
853	earliest = ULONG_MAX;
854
855	write_lock_bh(&rxrpc_connection_lock);
856	list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
857		_debug("reap CONN %d { u=%d,t=%ld }",
858		       conn->debug_id, atomic_read(&conn->usage),
859		       (long) now - (long) conn->put_time);
860
861		if (likely(atomic_read(&conn->usage) > 0))
862			continue;
863
864		spin_lock(&conn->trans->client_lock);
865		write_lock(&conn->trans->conn_lock);
866		reap_time = conn->put_time + rxrpc_connection_expiry;
867
868		if (atomic_read(&conn->usage) > 0) {
869			;
870		} else if (reap_time <= now) {
871			list_move_tail(&conn->link, &graveyard);
872			if (conn->out_clientflag)
873				rb_erase(&conn->node,
874					 &conn->trans->client_conns);
875			else
876				rb_erase(&conn->node,
877					 &conn->trans->server_conns);
878			if (conn->bundle) {
879				list_del_init(&conn->bundle_link);
880				conn->bundle->num_conns--;
881			}
882
883		} else if (reap_time < earliest) {
884			earliest = reap_time;
885		}
886
887		write_unlock(&conn->trans->conn_lock);
888		spin_unlock(&conn->trans->client_lock);
889	}
890	write_unlock_bh(&rxrpc_connection_lock);
891
892	if (earliest != ULONG_MAX) {
893		_debug("reschedule reaper %ld", (long) earliest - now);
894		ASSERTCMP(earliest, >, now);
895		rxrpc_queue_delayed_work(&rxrpc_connection_reap,
896					 (earliest - now) * HZ);
897	}
898
899	/* then destroy all those pulled out */
900	while (!list_empty(&graveyard)) {
901		conn = list_entry(graveyard.next, struct rxrpc_connection,
902				  link);
903		list_del_init(&conn->link);
904
905		ASSERTCMP(atomic_read(&conn->usage), ==, 0);
906		rxrpc_destroy_connection(conn);
907	}
908
909	_leave("");
910}
911
912/*
913 * preemptively destroy all the connection records rather than waiting for them
914 * to time out
915 */
916void __exit rxrpc_destroy_all_connections(void)
917{
918	_enter("");
919
920	rxrpc_connection_expiry = 0;
921	cancel_delayed_work(&rxrpc_connection_reap);
922	rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
923
924	_leave("");
925}