Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* Client connection-specific management code.
  3 *
  4 * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
  6 *
  7 * Client connections need to be cached for a little while after they've made a
  8 * call so as to handle retransmitted DATA packets in case the server didn't
  9 * receive the final ACK or terminating ABORT we sent it.
 10 *
 11 * There are flags of relevance to the cache:
 12 *
 13 *  (2) DONT_REUSE - The connection should be discarded as soon as possible and
 14 *      should not be reused.  This is set when an exclusive connection is used
 15 *      or a call ID counter overflows.
 16 *
 17 * The caching state may only be changed if the cache lock is held.
 18 *
 19 * There are two idle client connection expiry durations.  If the total number
 20 * of connections is below the reap threshold, we use the normal duration; if
 21 * it's above, we use the fast duration.
 22 */
 23
 24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 25
 26#include <linux/slab.h>
 27#include <linux/idr.h>
 28#include <linux/timer.h>
 29#include <linux/sched/signal.h>
 30
 31#include "ar-internal.h"
 32
 33__read_mostly unsigned int rxrpc_reap_client_connections = 900;
 34__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
 35__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
 36
 37static void rxrpc_activate_bundle(struct rxrpc_bundle *bundle)
 38{
 39	atomic_inc(&bundle->active);
 40}
 41
 42/*
 43 * Release a connection ID for a client connection.
 44 */
 45static void rxrpc_put_client_connection_id(struct rxrpc_local *local,
 46					   struct rxrpc_connection *conn)
 47{
 48	idr_remove(&local->conn_ids, conn->proto.cid >> RXRPC_CIDSHIFT);
 49}
 50
 51/*
 52 * Destroy the client connection ID tree.
 53 */
 54static void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local)
 55{
 56	struct rxrpc_connection *conn;
 57	int id;
 58
 59	if (!idr_is_empty(&local->conn_ids)) {
 60		idr_for_each_entry(&local->conn_ids, conn, id) {
 61			pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
 62			       conn, refcount_read(&conn->ref));
 63		}
 64		BUG();
 65	}
 66
 67	idr_destroy(&local->conn_ids);
 68}
 69
 70/*
 71 * Allocate a connection bundle.
 72 */
 73static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call,
 74					       gfp_t gfp)
 75{
 
 76	struct rxrpc_bundle *bundle;
 77
 78	bundle = kzalloc(sizeof(*bundle), gfp);
 79	if (bundle) {
 80		bundle->local		= call->local;
 81		bundle->peer		= rxrpc_get_peer(call->peer, rxrpc_peer_get_bundle);
 82		bundle->key		= key_get(call->key);
 83		bundle->security	= call->security;
 84		bundle->exclusive	= test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
 85		bundle->upgrade		= test_bit(RXRPC_CALL_UPGRADE, &call->flags);
 86		bundle->service_id	= call->dest_srx.srx_service;
 87		bundle->security_level	= call->security_level;
 
 88		refcount_set(&bundle->ref, 1);
 89		atomic_set(&bundle->active, 1);
 90		INIT_LIST_HEAD(&bundle->waiting_calls);
 91		trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_new);
 
 
 
 
 92	}
 93	return bundle;
 94}
 95
 96struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle,
 97				      enum rxrpc_bundle_trace why)
 98{
 99	int r;
100
101	__refcount_inc(&bundle->ref, &r);
102	trace_rxrpc_bundle(bundle->debug_id, r + 1, why);
103	return bundle;
104}
105
106static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
107{
108	trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_free);
 
 
 
 
109	rxrpc_put_peer(bundle->peer, rxrpc_peer_put_bundle);
110	key_put(bundle->key);
111	kfree(bundle);
112}
113
114void rxrpc_put_bundle(struct rxrpc_bundle *bundle, enum rxrpc_bundle_trace why)
115{
116	unsigned int id;
117	bool dead;
118	int r;
119
120	if (bundle) {
121		id = bundle->debug_id;
122		dead = __refcount_dec_and_test(&bundle->ref, &r);
123		trace_rxrpc_bundle(id, r - 1, why);
124		if (dead)
125			rxrpc_free_bundle(bundle);
126	}
127}
128
129/*
130 * Get rid of outstanding client connection preallocations when a local
131 * endpoint is destroyed.
132 */
133void rxrpc_purge_client_connections(struct rxrpc_local *local)
134{
135	rxrpc_destroy_client_conn_ids(local);
136}
137
138/*
139 * Allocate a client connection.
140 */
141static struct rxrpc_connection *
142rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle)
143{
144	struct rxrpc_connection *conn;
145	struct rxrpc_local *local = bundle->local;
146	struct rxrpc_net *rxnet = local->rxnet;
147	int id;
148
149	_enter("");
150
151	conn = rxrpc_alloc_connection(rxnet, GFP_ATOMIC | __GFP_NOWARN);
152	if (!conn)
153		return ERR_PTR(-ENOMEM);
154
155	id = idr_alloc_cyclic(&local->conn_ids, conn, 1, 0x40000000,
156			      GFP_ATOMIC | __GFP_NOWARN);
157	if (id < 0) {
158		kfree(conn);
159		return ERR_PTR(id);
160	}
161
162	refcount_set(&conn->ref, 1);
163	conn->proto.cid		= id << RXRPC_CIDSHIFT;
164	conn->proto.epoch	= local->rxnet->epoch;
165	conn->out_clientflag	= RXRPC_CLIENT_INITIATED;
166	conn->bundle		= rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn);
167	conn->local		= rxrpc_get_local(bundle->local, rxrpc_local_get_client_conn);
168	conn->peer		= rxrpc_get_peer(bundle->peer, rxrpc_peer_get_client_conn);
169	conn->key		= key_get(bundle->key);
170	conn->security		= bundle->security;
171	conn->exclusive		= bundle->exclusive;
172	conn->upgrade		= bundle->upgrade;
173	conn->orig_service_id	= bundle->service_id;
174	conn->security_level	= bundle->security_level;
175	conn->state		= RXRPC_CONN_CLIENT_UNSECURED;
176	conn->service_id	= conn->orig_service_id;
177
178	if (conn->security == &rxrpc_no_security)
179		conn->state	= RXRPC_CONN_CLIENT;
180
181	atomic_inc(&rxnet->nr_conns);
182	write_lock(&rxnet->conn_lock);
183	list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
184	write_unlock(&rxnet->conn_lock);
185
186	rxrpc_see_connection(conn, rxrpc_conn_new_client);
187
188	atomic_inc(&rxnet->nr_client_conns);
189	trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
190	return conn;
191}
192
193/*
194 * Determine if a connection may be reused.
195 */
196static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
197{
198	struct rxrpc_net *rxnet;
199	int id_cursor, id, distance, limit;
200
201	if (!conn)
202		goto dont_reuse;
203
204	rxnet = conn->rxnet;
205	if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
206		goto dont_reuse;
207
208	if ((conn->state != RXRPC_CONN_CLIENT_UNSECURED &&
209	     conn->state != RXRPC_CONN_CLIENT) ||
210	    conn->proto.epoch != rxnet->epoch)
211		goto mark_dont_reuse;
212
213	/* The IDR tree gets very expensive on memory if the connection IDs are
214	 * widely scattered throughout the number space, so we shall want to
215	 * kill off connections that, say, have an ID more than about four
216	 * times the maximum number of client conns away from the current
217	 * allocation point to try and keep the IDs concentrated.
218	 */
219	id_cursor = idr_get_cursor(&conn->local->conn_ids);
220	id = conn->proto.cid >> RXRPC_CIDSHIFT;
221	distance = id - id_cursor;
222	if (distance < 0)
223		distance = -distance;
224	limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024);
225	if (distance > limit)
226		goto mark_dont_reuse;
227
228	return true;
229
230mark_dont_reuse:
231	set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
232dont_reuse:
233	return false;
234}
235
236/*
237 * Look up the conn bundle that matches the connection parameters, adding it if
238 * it doesn't yet exist.
239 */
240int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp)
241{
242	static atomic_t rxrpc_bundle_id;
243	struct rxrpc_bundle *bundle, *candidate;
244	struct rxrpc_local *local = call->local;
245	struct rb_node *p, **pp, *parent;
246	long diff;
247	bool upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags);
248
249	_enter("{%px,%x,%u,%u}",
250	       call->peer, key_serial(call->key), call->security_level,
251	       upgrade);
252
253	if (test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags)) {
254		call->bundle = rxrpc_alloc_bundle(call, gfp);
255		return call->bundle ? 0 : -ENOMEM;
256	}
257
258	/* First, see if the bundle is already there. */
259	_debug("search 1");
260	spin_lock(&local->client_bundles_lock);
261	p = local->client_bundles.rb_node;
262	while (p) {
263		bundle = rb_entry(p, struct rxrpc_bundle, local_node);
264
265#define cmp(X, Y) ((long)(X) - (long)(Y))
266		diff = (cmp(bundle->peer, call->peer) ?:
267			cmp(bundle->key, call->key) ?:
268			cmp(bundle->security_level, call->security_level) ?:
269			cmp(bundle->upgrade, upgrade));
270#undef cmp
271		if (diff < 0)
272			p = p->rb_left;
273		else if (diff > 0)
274			p = p->rb_right;
275		else
276			goto found_bundle;
277	}
278	spin_unlock(&local->client_bundles_lock);
279	_debug("not found");
280
281	/* It wasn't.  We need to add one. */
282	candidate = rxrpc_alloc_bundle(call, gfp);
283	if (!candidate)
284		return -ENOMEM;
285
286	_debug("search 2");
287	spin_lock(&local->client_bundles_lock);
288	pp = &local->client_bundles.rb_node;
289	parent = NULL;
290	while (*pp) {
291		parent = *pp;
292		bundle = rb_entry(parent, struct rxrpc_bundle, local_node);
293
294#define cmp(X, Y) ((long)(X) - (long)(Y))
295		diff = (cmp(bundle->peer, call->peer) ?:
296			cmp(bundle->key, call->key) ?:
297			cmp(bundle->security_level, call->security_level) ?:
298			cmp(bundle->upgrade, upgrade));
299#undef cmp
300		if (diff < 0)
301			pp = &(*pp)->rb_left;
302		else if (diff > 0)
303			pp = &(*pp)->rb_right;
304		else
305			goto found_bundle_free;
306	}
307
308	_debug("new bundle");
309	candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id);
310	rb_link_node(&candidate->local_node, parent, pp);
311	rb_insert_color(&candidate->local_node, &local->client_bundles);
312	call->bundle = rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call);
313	spin_unlock(&local->client_bundles_lock);
314	_leave(" = B=%u [new]", call->bundle->debug_id);
315	return 0;
316
317found_bundle_free:
318	rxrpc_free_bundle(candidate);
319found_bundle:
320	call->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call);
321	rxrpc_activate_bundle(bundle);
322	spin_unlock(&local->client_bundles_lock);
323	_leave(" = B=%u [found]", call->bundle->debug_id);
324	return 0;
325}
326
327/*
328 * Allocate a new connection and add it into a bundle.
329 */
330static bool rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle,
331				     unsigned int slot)
332{
333	struct rxrpc_connection *conn, *old;
334	unsigned int shift = slot * RXRPC_MAXCALLS;
335	unsigned int i;
336
337	old = bundle->conns[slot];
338	if (old) {
339		bundle->conns[slot] = NULL;
 
340		trace_rxrpc_client(old, -1, rxrpc_client_replace);
341		rxrpc_put_connection(old, rxrpc_conn_put_noreuse);
342	}
343
344	conn = rxrpc_alloc_client_connection(bundle);
345	if (IS_ERR(conn)) {
346		bundle->alloc_error = PTR_ERR(conn);
347		return false;
348	}
349
350	rxrpc_activate_bundle(bundle);
351	conn->bundle_shift = shift;
352	bundle->conns[slot] = conn;
 
353	for (i = 0; i < RXRPC_MAXCALLS; i++)
354		set_bit(shift + i, &bundle->avail_chans);
355	return true;
356}
357
358/*
359 * Add a connection to a bundle if there are no usable connections or we have
360 * connections waiting for extra capacity.
361 */
362static bool rxrpc_bundle_has_space(struct rxrpc_bundle *bundle)
363{
364	int slot = -1, i, usable;
365
366	_enter("");
367
368	bundle->alloc_error = 0;
369
370	/* See if there are any usable connections. */
371	usable = 0;
372	for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
373		if (rxrpc_may_reuse_conn(bundle->conns[i]))
374			usable++;
375		else if (slot == -1)
376			slot = i;
377	}
378
379	if (!usable && bundle->upgrade)
380		bundle->try_upgrade = true;
381
382	if (!usable)
383		goto alloc_conn;
384
385	if (!bundle->avail_chans &&
386	    !bundle->try_upgrade &&
387	    usable < ARRAY_SIZE(bundle->conns))
388		goto alloc_conn;
389
390	_leave("");
391	return usable;
392
393alloc_conn:
394	return slot >= 0 ? rxrpc_add_conn_to_bundle(bundle, slot) : false;
395}
396
397/*
398 * Assign a channel to the call at the front of the queue and wake the call up.
399 * We don't increment the callNumber counter until this number has been exposed
400 * to the world.
401 */
402static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
403				       unsigned int channel)
404{
405	struct rxrpc_channel *chan = &conn->channels[channel];
406	struct rxrpc_bundle *bundle = conn->bundle;
407	struct rxrpc_call *call = list_entry(bundle->waiting_calls.next,
408					     struct rxrpc_call, wait_link);
409	u32 call_id = chan->call_counter + 1;
410
411	_enter("C=%x,%u", conn->debug_id, channel);
412
413	list_del_init(&call->wait_link);
414
415	trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
416
417	/* Cancel the final ACK on the previous call if it hasn't been sent yet
418	 * as the DATA packet will implicitly ACK it.
419	 */
420	clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
421	clear_bit(conn->bundle_shift + channel, &bundle->avail_chans);
422
423	rxrpc_see_call(call, rxrpc_call_see_activate_client);
424	call->conn	= rxrpc_get_connection(conn, rxrpc_conn_get_activate_call);
425	call->cid	= conn->proto.cid | channel;
426	call->call_id	= call_id;
427	call->dest_srx.srx_service = conn->service_id;
428	call->cong_ssthresh = call->peer->cong_ssthresh;
429	if (call->cong_cwnd >= call->cong_ssthresh)
430		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
431	else
432		call->cong_mode = RXRPC_CALL_SLOW_START;
433
434	chan->call_id		= call_id;
435	chan->call_debug_id	= call->debug_id;
436	chan->call		= call;
437
438	rxrpc_see_call(call, rxrpc_call_see_connected);
439	trace_rxrpc_connect_call(call);
440	call->tx_last_sent = ktime_get_real();
441	rxrpc_start_call_timer(call);
442	rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_SEND_REQUEST);
443	wake_up(&call->waitq);
444}
445
446/*
447 * Remove a connection from the idle list if it's on it.
448 */
449static void rxrpc_unidle_conn(struct rxrpc_connection *conn)
450{
451	if (!list_empty(&conn->cache_link)) {
452		list_del_init(&conn->cache_link);
453		rxrpc_put_connection(conn, rxrpc_conn_put_unidle);
454	}
455}
456
457/*
458 * Assign channels and callNumbers to waiting calls.
459 */
460static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
461{
462	struct rxrpc_connection *conn;
463	unsigned long avail, mask;
464	unsigned int channel, slot;
465
466	trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
467
468	if (bundle->try_upgrade)
469		mask = 1;
470	else
471		mask = ULONG_MAX;
472
473	while (!list_empty(&bundle->waiting_calls)) {
474		avail = bundle->avail_chans & mask;
475		if (!avail)
476			break;
477		channel = __ffs(avail);
478		clear_bit(channel, &bundle->avail_chans);
479
480		slot = channel / RXRPC_MAXCALLS;
481		conn = bundle->conns[slot];
482		if (!conn)
483			break;
484
485		if (bundle->try_upgrade)
486			set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
487		rxrpc_unidle_conn(conn);
488
489		channel &= (RXRPC_MAXCALLS - 1);
490		conn->act_chans	|= 1 << channel;
491		rxrpc_activate_one_channel(conn, channel);
492	}
493}
494
495/*
496 * Connect waiting channels (called from the I/O thread).
497 */
498void rxrpc_connect_client_calls(struct rxrpc_local *local)
499{
500	struct rxrpc_call *call;
501
502	while ((call = list_first_entry_or_null(&local->new_client_calls,
503						struct rxrpc_call, wait_link))
504	       ) {
505		struct rxrpc_bundle *bundle = call->bundle;
506
507		spin_lock(&local->client_call_lock);
508		list_move_tail(&call->wait_link, &bundle->waiting_calls);
 
509		spin_unlock(&local->client_call_lock);
510
511		if (rxrpc_bundle_has_space(bundle))
512			rxrpc_activate_channels(bundle);
513	}
514}
515
516/*
517 * Note that a call, and thus a connection, is about to be exposed to the
518 * world.
519 */
520void rxrpc_expose_client_call(struct rxrpc_call *call)
521{
522	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
523	struct rxrpc_connection *conn = call->conn;
524	struct rxrpc_channel *chan = &conn->channels[channel];
525
526	if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
527		/* Mark the call ID as being used.  If the callNumber counter
528		 * exceeds ~2 billion, we kill the connection after its
529		 * outstanding calls have finished so that the counter doesn't
530		 * wrap.
531		 */
532		chan->call_counter++;
533		if (chan->call_counter >= INT_MAX)
534			set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
535		trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
536
537		spin_lock(&call->peer->lock);
538		hlist_add_head(&call->error_link, &call->peer->error_targets);
539		spin_unlock(&call->peer->lock);
540	}
541}
542
543/*
544 * Set the reap timer.
545 */
546static void rxrpc_set_client_reap_timer(struct rxrpc_local *local)
547{
548	if (!local->kill_all_client_conns) {
549		unsigned long now = jiffies;
550		unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
551
552		if (local->rxnet->live)
553			timer_reduce(&local->client_conn_reap_timer, reap_at);
554	}
555}
556
557/*
558 * Disconnect a client call.
559 */
560void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call *call)
561{
562	struct rxrpc_connection *conn;
563	struct rxrpc_channel *chan = NULL;
564	struct rxrpc_local *local = bundle->local;
565	unsigned int channel;
566	bool may_reuse;
567	u32 cid;
568
569	_enter("c=%x", call->debug_id);
570
571	/* Calls that have never actually been assigned a channel can simply be
572	 * discarded.
573	 */
574	conn = call->conn;
575	if (!conn) {
576		_debug("call is waiting");
577		ASSERTCMP(call->call_id, ==, 0);
578		ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
 
 
579		list_del_init(&call->wait_link);
 
580		return;
581	}
582
583	cid = call->cid;
584	channel = cid & RXRPC_CHANNELMASK;
585	chan = &conn->channels[channel];
586	trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
587
588	if (WARN_ON(chan->call != call))
589		return;
590
591	may_reuse = rxrpc_may_reuse_conn(conn);
592
593	/* If a client call was exposed to the world, we save the result for
594	 * retransmission.
595	 *
596	 * We use a barrier here so that the call number and abort code can be
597	 * read without needing to take a lock.
598	 *
599	 * TODO: Make the incoming packet handler check this and handle
600	 * terminal retransmission without requiring access to the call.
601	 */
602	if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
603		_debug("exposed %u,%u", call->call_id, call->abort_code);
604		__rxrpc_disconnect_call(conn, call);
605
606		if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
607			trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
608			bundle->try_upgrade = false;
609			if (may_reuse)
610				rxrpc_activate_channels(bundle);
611		}
612	}
613
614	/* See if we can pass the channel directly to another call. */
615	if (may_reuse && !list_empty(&bundle->waiting_calls)) {
616		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
617		rxrpc_activate_one_channel(conn, channel);
618		return;
619	}
620
621	/* Schedule the final ACK to be transmitted in a short while so that it
622	 * can be skipped if we find a follow-on call.  The first DATA packet
623	 * of the follow on call will implicitly ACK this call.
624	 */
625	if (call->completion == RXRPC_CALL_SUCCEEDED &&
626	    test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
627		unsigned long final_ack_at = jiffies + 2;
628
629		WRITE_ONCE(chan->final_ack_at, final_ack_at);
630		smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
631		set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
632		rxrpc_reduce_conn_timer(conn, final_ack_at);
633	}
634
635	/* Deactivate the channel. */
636	chan->call = NULL;
637	set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans);
638	conn->act_chans	&= ~(1 << channel);
639
640	/* If no channels remain active, then put the connection on the idle
641	 * list for a short while.  Give it a ref to stop it going away if it
642	 * becomes unbundled.
643	 */
644	if (!conn->act_chans) {
645		trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
646		conn->idle_timestamp = jiffies;
647
648		rxrpc_get_connection(conn, rxrpc_conn_get_idle);
649		list_move_tail(&conn->cache_link, &local->idle_client_conns);
650
651		rxrpc_set_client_reap_timer(local);
652	}
653}
654
655/*
656 * Remove a connection from a bundle.
657 */
658static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
659{
660	struct rxrpc_bundle *bundle = conn->bundle;
661	unsigned int bindex;
662	int i;
663
664	_enter("C=%x", conn->debug_id);
665
666	if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
667		rxrpc_process_delayed_final_acks(conn, true);
668
669	bindex = conn->bundle_shift / RXRPC_MAXCALLS;
670	if (bundle->conns[bindex] == conn) {
671		_debug("clear slot %u", bindex);
672		bundle->conns[bindex] = NULL;
 
673		for (i = 0; i < RXRPC_MAXCALLS; i++)
674			clear_bit(conn->bundle_shift + i, &bundle->avail_chans);
675		rxrpc_put_client_connection_id(bundle->local, conn);
676		rxrpc_deactivate_bundle(bundle);
677		rxrpc_put_connection(conn, rxrpc_conn_put_unbundle);
678	}
679}
680
681/*
682 * Drop the active count on a bundle.
683 */
684void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle)
685{
686	struct rxrpc_local *local;
687	bool need_put = false;
688
689	if (!bundle)
690		return;
691
692	local = bundle->local;
693	if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) {
694		if (!bundle->exclusive) {
695			_debug("erase bundle");
696			rb_erase(&bundle->local_node, &local->client_bundles);
697			need_put = true;
698		}
699
700		spin_unlock(&local->client_bundles_lock);
701		if (need_put)
702			rxrpc_put_bundle(bundle, rxrpc_bundle_put_discard);
703	}
704}
705
706/*
707 * Clean up a dead client connection.
708 */
709void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
710{
711	struct rxrpc_local *local = conn->local;
712	struct rxrpc_net *rxnet = local->rxnet;
713
714	_enter("C=%x", conn->debug_id);
715
716	trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
717	atomic_dec(&rxnet->nr_client_conns);
718
719	rxrpc_put_client_connection_id(local, conn);
720}
721
722/*
723 * Discard expired client connections from the idle list.  Each conn in the
724 * idle list has been exposed and holds an extra ref because of that.
725 *
726 * This may be called from conn setup or from a work item so cannot be
727 * considered non-reentrant.
728 */
729void rxrpc_discard_expired_client_conns(struct rxrpc_local *local)
730{
731	struct rxrpc_connection *conn;
732	unsigned long expiry, conn_expires_at, now;
733	unsigned int nr_conns;
734
735	_enter("");
736
737	/* We keep an estimate of what the number of conns ought to be after
738	 * we've discarded some so that we don't overdo the discarding.
739	 */
740	nr_conns = atomic_read(&local->rxnet->nr_client_conns);
741
742next:
743	conn = list_first_entry_or_null(&local->idle_client_conns,
744					struct rxrpc_connection, cache_link);
745	if (!conn)
746		return;
747
748	if (!local->kill_all_client_conns) {
749		/* If the number of connections is over the reap limit, we
750		 * expedite discard by reducing the expiry timeout.  We must,
751		 * however, have at least a short grace period to be able to do
752		 * final-ACK or ABORT retransmission.
753		 */
754		expiry = rxrpc_conn_idle_client_expiry;
755		if (nr_conns > rxrpc_reap_client_connections)
756			expiry = rxrpc_conn_idle_client_fast_expiry;
757		if (conn->local->service_closed)
758			expiry = rxrpc_closed_conn_expiry * HZ;
759
760		conn_expires_at = conn->idle_timestamp + expiry;
761
762		now = READ_ONCE(jiffies);
763		if (time_after(conn_expires_at, now))
764			goto not_yet_expired;
765	}
766
767	atomic_dec(&conn->active);
768	trace_rxrpc_client(conn, -1, rxrpc_client_discard);
769	list_del_init(&conn->cache_link);
770
771	rxrpc_unbundle_conn(conn);
772	/* Drop the ->cache_link ref */
773	rxrpc_put_connection(conn, rxrpc_conn_put_discard_idle);
774
775	nr_conns--;
776	goto next;
777
778not_yet_expired:
779	/* The connection at the front of the queue hasn't yet expired, so
780	 * schedule the work item for that point if we discarded something.
781	 *
782	 * We don't worry if the work item is already scheduled - it can look
783	 * after rescheduling itself at a later time.  We could cancel it, but
784	 * then things get messier.
785	 */
786	_debug("not yet");
787	if (!local->kill_all_client_conns)
788		timer_reduce(&local->client_conn_reap_timer, conn_expires_at);
789
790	_leave("");
791}
792
793/*
794 * Clean up the client connections on a local endpoint.
795 */
796void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
797{
798	struct rxrpc_connection *conn;
799
800	_enter("");
801
802	local->kill_all_client_conns = true;
803
804	del_timer_sync(&local->client_conn_reap_timer);
805
806	while ((conn = list_first_entry_or_null(&local->idle_client_conns,
807						struct rxrpc_connection, cache_link))) {
808		list_del_init(&conn->cache_link);
809		atomic_dec(&conn->active);
810		trace_rxrpc_client(conn, -1, rxrpc_client_discard);
811		rxrpc_unbundle_conn(conn);
812		rxrpc_put_connection(conn, rxrpc_conn_put_local_dead);
813	}
814
815	_leave(" [culled]");
816}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* Client connection-specific management code.
  3 *
  4 * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved.
  5 * Written by David Howells (dhowells@redhat.com)
  6 *
  7 * Client connections need to be cached for a little while after they've made a
  8 * call so as to handle retransmitted DATA packets in case the server didn't
  9 * receive the final ACK or terminating ABORT we sent it.
 10 *
 11 * There are flags of relevance to the cache:
 12 *
 13 *  (2) DONT_REUSE - The connection should be discarded as soon as possible and
 14 *      should not be reused.  This is set when an exclusive connection is used
 15 *      or a call ID counter overflows.
 16 *
 17 * The caching state may only be changed if the cache lock is held.
 18 *
 19 * There are two idle client connection expiry durations.  If the total number
 20 * of connections is below the reap threshold, we use the normal duration; if
 21 * it's above, we use the fast duration.
 22 */
 23
 24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 25
 26#include <linux/slab.h>
 27#include <linux/idr.h>
 28#include <linux/timer.h>
 29#include <linux/sched/signal.h>
 30
 31#include "ar-internal.h"
 32
 33__read_mostly unsigned int rxrpc_reap_client_connections = 900;
 34__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
 35__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
 36
 37static void rxrpc_activate_bundle(struct rxrpc_bundle *bundle)
 38{
 39	atomic_inc(&bundle->active);
 40}
 41
 42/*
 43 * Release a connection ID for a client connection.
 44 */
 45static void rxrpc_put_client_connection_id(struct rxrpc_local *local,
 46					   struct rxrpc_connection *conn)
 47{
 48	idr_remove(&local->conn_ids, conn->proto.cid >> RXRPC_CIDSHIFT);
 49}
 50
 51/*
 52 * Destroy the client connection ID tree.
 53 */
 54static void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local)
 55{
 56	struct rxrpc_connection *conn;
 57	int id;
 58
 59	if (!idr_is_empty(&local->conn_ids)) {
 60		idr_for_each_entry(&local->conn_ids, conn, id) {
 61			pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
 62			       conn, refcount_read(&conn->ref));
 63		}
 64		BUG();
 65	}
 66
 67	idr_destroy(&local->conn_ids);
 68}
 69
 70/*
 71 * Allocate a connection bundle.
 72 */
 73static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call,
 74					       gfp_t gfp)
 75{
 76	static atomic_t rxrpc_bundle_id;
 77	struct rxrpc_bundle *bundle;
 78
 79	bundle = kzalloc(sizeof(*bundle), gfp);
 80	if (bundle) {
 81		bundle->local		= call->local;
 82		bundle->peer		= rxrpc_get_peer(call->peer, rxrpc_peer_get_bundle);
 83		bundle->key		= key_get(call->key);
 84		bundle->security	= call->security;
 85		bundle->exclusive	= test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
 86		bundle->upgrade		= test_bit(RXRPC_CALL_UPGRADE, &call->flags);
 87		bundle->service_id	= call->dest_srx.srx_service;
 88		bundle->security_level	= call->security_level;
 89		bundle->debug_id	= atomic_inc_return(&rxrpc_bundle_id);
 90		refcount_set(&bundle->ref, 1);
 91		atomic_set(&bundle->active, 1);
 92		INIT_LIST_HEAD(&bundle->waiting_calls);
 93		trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_new);
 94
 95		write_lock(&bundle->local->rxnet->conn_lock);
 96		list_add_tail(&bundle->proc_link, &bundle->local->rxnet->bundle_proc_list);
 97		write_unlock(&bundle->local->rxnet->conn_lock);
 98	}
 99	return bundle;
100}
101
102struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle,
103				      enum rxrpc_bundle_trace why)
104{
105	int r;
106
107	__refcount_inc(&bundle->ref, &r);
108	trace_rxrpc_bundle(bundle->debug_id, r + 1, why);
109	return bundle;
110}
111
112static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
113{
114	trace_rxrpc_bundle(bundle->debug_id, refcount_read(&bundle->ref),
115			   rxrpc_bundle_free);
116	write_lock(&bundle->local->rxnet->conn_lock);
117	list_del(&bundle->proc_link);
118	write_unlock(&bundle->local->rxnet->conn_lock);
119	rxrpc_put_peer(bundle->peer, rxrpc_peer_put_bundle);
120	key_put(bundle->key);
121	kfree(bundle);
122}
123
124void rxrpc_put_bundle(struct rxrpc_bundle *bundle, enum rxrpc_bundle_trace why)
125{
126	unsigned int id;
127	bool dead;
128	int r;
129
130	if (bundle) {
131		id = bundle->debug_id;
132		dead = __refcount_dec_and_test(&bundle->ref, &r);
133		trace_rxrpc_bundle(id, r - 1, why);
134		if (dead)
135			rxrpc_free_bundle(bundle);
136	}
137}
138
139/*
140 * Get rid of outstanding client connection preallocations when a local
141 * endpoint is destroyed.
142 */
143void rxrpc_purge_client_connections(struct rxrpc_local *local)
144{
145	rxrpc_destroy_client_conn_ids(local);
146}
147
148/*
149 * Allocate a client connection.
150 */
151static struct rxrpc_connection *
152rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle)
153{
154	struct rxrpc_connection *conn;
155	struct rxrpc_local *local = bundle->local;
156	struct rxrpc_net *rxnet = local->rxnet;
157	int id;
158
159	_enter("");
160
161	conn = rxrpc_alloc_connection(rxnet, GFP_ATOMIC | __GFP_NOWARN);
162	if (!conn)
163		return ERR_PTR(-ENOMEM);
164
165	id = idr_alloc_cyclic(&local->conn_ids, conn, 1, 0x40000000,
166			      GFP_ATOMIC | __GFP_NOWARN);
167	if (id < 0) {
168		kfree(conn);
169		return ERR_PTR(id);
170	}
171
172	refcount_set(&conn->ref, 1);
173	conn->proto.cid		= id << RXRPC_CIDSHIFT;
174	conn->proto.epoch	= local->rxnet->epoch;
175	conn->out_clientflag	= RXRPC_CLIENT_INITIATED;
176	conn->bundle		= rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn);
177	conn->local		= rxrpc_get_local(bundle->local, rxrpc_local_get_client_conn);
178	conn->peer		= rxrpc_get_peer(bundle->peer, rxrpc_peer_get_client_conn);
179	conn->key		= key_get(bundle->key);
180	conn->security		= bundle->security;
181	conn->exclusive		= bundle->exclusive;
182	conn->upgrade		= bundle->upgrade;
183	conn->orig_service_id	= bundle->service_id;
184	conn->security_level	= bundle->security_level;
185	conn->state		= RXRPC_CONN_CLIENT_UNSECURED;
186	conn->service_id	= conn->orig_service_id;
187
188	if (conn->security == &rxrpc_no_security)
189		conn->state	= RXRPC_CONN_CLIENT;
190
191	atomic_inc(&rxnet->nr_conns);
192	write_lock(&rxnet->conn_lock);
193	list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
194	write_unlock(&rxnet->conn_lock);
195
196	rxrpc_see_connection(conn, rxrpc_conn_new_client);
197
198	atomic_inc(&rxnet->nr_client_conns);
199	trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
200	return conn;
201}
202
203/*
204 * Determine if a connection may be reused.
205 */
206static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
207{
208	struct rxrpc_net *rxnet;
209	int id_cursor, id, distance, limit;
210
211	if (!conn)
212		goto dont_reuse;
213
214	rxnet = conn->rxnet;
215	if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
216		goto dont_reuse;
217
218	if ((conn->state != RXRPC_CONN_CLIENT_UNSECURED &&
219	     conn->state != RXRPC_CONN_CLIENT) ||
220	    conn->proto.epoch != rxnet->epoch)
221		goto mark_dont_reuse;
222
223	/* The IDR tree gets very expensive on memory if the connection IDs are
224	 * widely scattered throughout the number space, so we shall want to
225	 * kill off connections that, say, have an ID more than about four
226	 * times the maximum number of client conns away from the current
227	 * allocation point to try and keep the IDs concentrated.
228	 */
229	id_cursor = idr_get_cursor(&conn->local->conn_ids);
230	id = conn->proto.cid >> RXRPC_CIDSHIFT;
231	distance = id - id_cursor;
232	if (distance < 0)
233		distance = -distance;
234	limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024);
235	if (distance > limit)
236		goto mark_dont_reuse;
237
238	return true;
239
240mark_dont_reuse:
241	set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
242dont_reuse:
243	return false;
244}
245
246/*
247 * Look up the conn bundle that matches the connection parameters, adding it if
248 * it doesn't yet exist.
249 */
250int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp)
251{
 
252	struct rxrpc_bundle *bundle, *candidate;
253	struct rxrpc_local *local = call->local;
254	struct rb_node *p, **pp, *parent;
255	long diff;
256	bool upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags);
257
258	_enter("{%px,%x,%u,%u}",
259	       call->peer, key_serial(call->key), call->security_level,
260	       upgrade);
261
262	if (test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags)) {
263		call->bundle = rxrpc_alloc_bundle(call, gfp);
264		return call->bundle ? 0 : -ENOMEM;
265	}
266
267	/* First, see if the bundle is already there. */
268	_debug("search 1");
269	spin_lock(&local->client_bundles_lock);
270	p = local->client_bundles.rb_node;
271	while (p) {
272		bundle = rb_entry(p, struct rxrpc_bundle, local_node);
273
274#define cmp(X, Y) ((long)(X) - (long)(Y))
275		diff = (cmp(bundle->peer, call->peer) ?:
276			cmp(bundle->key, call->key) ?:
277			cmp(bundle->security_level, call->security_level) ?:
278			cmp(bundle->upgrade, upgrade));
279#undef cmp
280		if (diff < 0)
281			p = p->rb_left;
282		else if (diff > 0)
283			p = p->rb_right;
284		else
285			goto found_bundle;
286	}
287	spin_unlock(&local->client_bundles_lock);
288	_debug("not found");
289
290	/* It wasn't.  We need to add one. */
291	candidate = rxrpc_alloc_bundle(call, gfp);
292	if (!candidate)
293		return -ENOMEM;
294
295	_debug("search 2");
296	spin_lock(&local->client_bundles_lock);
297	pp = &local->client_bundles.rb_node;
298	parent = NULL;
299	while (*pp) {
300		parent = *pp;
301		bundle = rb_entry(parent, struct rxrpc_bundle, local_node);
302
303#define cmp(X, Y) ((long)(X) - (long)(Y))
304		diff = (cmp(bundle->peer, call->peer) ?:
305			cmp(bundle->key, call->key) ?:
306			cmp(bundle->security_level, call->security_level) ?:
307			cmp(bundle->upgrade, upgrade));
308#undef cmp
309		if (diff < 0)
310			pp = &(*pp)->rb_left;
311		else if (diff > 0)
312			pp = &(*pp)->rb_right;
313		else
314			goto found_bundle_free;
315	}
316
317	_debug("new bundle");
 
318	rb_link_node(&candidate->local_node, parent, pp);
319	rb_insert_color(&candidate->local_node, &local->client_bundles);
320	call->bundle = rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call);
321	spin_unlock(&local->client_bundles_lock);
322	_leave(" = B=%u [new]", call->bundle->debug_id);
323	return 0;
324
325found_bundle_free:
326	rxrpc_free_bundle(candidate);
327found_bundle:
328	call->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call);
329	rxrpc_activate_bundle(bundle);
330	spin_unlock(&local->client_bundles_lock);
331	_leave(" = B=%u [found]", call->bundle->debug_id);
332	return 0;
333}
334
335/*
336 * Allocate a new connection and add it into a bundle.
337 */
338static bool rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle,
339				     unsigned int slot)
340{
341	struct rxrpc_connection *conn, *old;
342	unsigned int shift = slot * RXRPC_MAXCALLS;
343	unsigned int i;
344
345	old = bundle->conns[slot];
346	if (old) {
347		bundle->conns[slot] = NULL;
348		bundle->conn_ids[slot] = 0;
349		trace_rxrpc_client(old, -1, rxrpc_client_replace);
350		rxrpc_put_connection(old, rxrpc_conn_put_noreuse);
351	}
352
353	conn = rxrpc_alloc_client_connection(bundle);
354	if (IS_ERR(conn)) {
355		bundle->alloc_error = PTR_ERR(conn);
356		return false;
357	}
358
359	rxrpc_activate_bundle(bundle);
360	conn->bundle_shift = shift;
361	bundle->conns[slot] = conn;
362	bundle->conn_ids[slot] = conn->debug_id;
363	for (i = 0; i < RXRPC_MAXCALLS; i++)
364		set_bit(shift + i, &bundle->avail_chans);
365	return true;
366}
367
368/*
369 * Add a connection to a bundle if there are no usable connections or we have
370 * connections waiting for extra capacity.
371 */
372static bool rxrpc_bundle_has_space(struct rxrpc_bundle *bundle)
373{
374	int slot = -1, i, usable;
375
376	_enter("");
377
378	bundle->alloc_error = 0;
379
380	/* See if there are any usable connections. */
381	usable = 0;
382	for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
383		if (rxrpc_may_reuse_conn(bundle->conns[i]))
384			usable++;
385		else if (slot == -1)
386			slot = i;
387	}
388
389	if (!usable && bundle->upgrade)
390		bundle->try_upgrade = true;
391
392	if (!usable)
393		goto alloc_conn;
394
395	if (!bundle->avail_chans &&
396	    !bundle->try_upgrade &&
397	    usable < ARRAY_SIZE(bundle->conns))
398		goto alloc_conn;
399
400	_leave("");
401	return usable;
402
403alloc_conn:
404	return slot >= 0 ? rxrpc_add_conn_to_bundle(bundle, slot) : false;
405}
406
407/*
408 * Assign a channel to the call at the front of the queue and wake the call up.
409 * We don't increment the callNumber counter until this number has been exposed
410 * to the world.
411 */
412static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
413				       unsigned int channel)
414{
415	struct rxrpc_channel *chan = &conn->channels[channel];
416	struct rxrpc_bundle *bundle = conn->bundle;
417	struct rxrpc_call *call = list_entry(bundle->waiting_calls.next,
418					     struct rxrpc_call, wait_link);
419	u32 call_id = chan->call_counter + 1;
420
421	_enter("C=%x,%u", conn->debug_id, channel);
422
423	list_del_init(&call->wait_link);
424
425	trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
426
427	/* Cancel the final ACK on the previous call if it hasn't been sent yet
428	 * as the DATA packet will implicitly ACK it.
429	 */
430	clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
431	clear_bit(conn->bundle_shift + channel, &bundle->avail_chans);
432
433	rxrpc_see_call(call, rxrpc_call_see_activate_client);
434	call->conn	= rxrpc_get_connection(conn, rxrpc_conn_get_activate_call);
435	call->cid	= conn->proto.cid | channel;
436	call->call_id	= call_id;
437	call->dest_srx.srx_service = conn->service_id;
438	call->cong_ssthresh = call->peer->cong_ssthresh;
439	if (call->cong_cwnd >= call->cong_ssthresh)
440		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
441	else
442		call->cong_mode = RXRPC_CALL_SLOW_START;
443
444	chan->call_id		= call_id;
445	chan->call_debug_id	= call->debug_id;
446	chan->call		= call;
447
448	rxrpc_see_call(call, rxrpc_call_see_connected);
449	trace_rxrpc_connect_call(call);
450	call->tx_last_sent = ktime_get_real();
451	rxrpc_start_call_timer(call);
452	rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_SEND_REQUEST);
453	wake_up(&call->waitq);
454}
455
456/*
457 * Remove a connection from the idle list if it's on it.
458 */
459static void rxrpc_unidle_conn(struct rxrpc_connection *conn)
460{
461	if (!list_empty(&conn->cache_link)) {
462		list_del_init(&conn->cache_link);
463		rxrpc_put_connection(conn, rxrpc_conn_put_unidle);
464	}
465}
466
467/*
468 * Assign channels and callNumbers to waiting calls.
469 */
470static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
471{
472	struct rxrpc_connection *conn;
473	unsigned long avail, mask;
474	unsigned int channel, slot;
475
476	trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
477
478	if (bundle->try_upgrade)
479		mask = 1;
480	else
481		mask = ULONG_MAX;
482
483	while (!list_empty(&bundle->waiting_calls)) {
484		avail = bundle->avail_chans & mask;
485		if (!avail)
486			break;
487		channel = __ffs(avail);
488		clear_bit(channel, &bundle->avail_chans);
489
490		slot = channel / RXRPC_MAXCALLS;
491		conn = bundle->conns[slot];
492		if (!conn)
493			break;
494
495		if (bundle->try_upgrade)
496			set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
497		rxrpc_unidle_conn(conn);
498
499		channel &= (RXRPC_MAXCALLS - 1);
500		conn->act_chans	|= 1 << channel;
501		rxrpc_activate_one_channel(conn, channel);
502	}
503}
504
505/*
506 * Connect waiting channels (called from the I/O thread).
507 */
508void rxrpc_connect_client_calls(struct rxrpc_local *local)
509{
510	struct rxrpc_call *call;
511
512	while ((call = list_first_entry_or_null(&local->new_client_calls,
513						struct rxrpc_call, wait_link))
514	       ) {
515		struct rxrpc_bundle *bundle = call->bundle;
516
517		spin_lock(&local->client_call_lock);
518		list_move_tail(&call->wait_link, &bundle->waiting_calls);
519		rxrpc_see_call(call, rxrpc_call_see_waiting_call);
520		spin_unlock(&local->client_call_lock);
521
522		if (rxrpc_bundle_has_space(bundle))
523			rxrpc_activate_channels(bundle);
524	}
525}
526
527/*
528 * Note that a call, and thus a connection, is about to be exposed to the
529 * world.
530 */
531void rxrpc_expose_client_call(struct rxrpc_call *call)
532{
533	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
534	struct rxrpc_connection *conn = call->conn;
535	struct rxrpc_channel *chan = &conn->channels[channel];
536
537	if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
538		/* Mark the call ID as being used.  If the callNumber counter
539		 * exceeds ~2 billion, we kill the connection after its
540		 * outstanding calls have finished so that the counter doesn't
541		 * wrap.
542		 */
543		chan->call_counter++;
544		if (chan->call_counter >= INT_MAX)
545			set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
546		trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
547
548		spin_lock(&call->peer->lock);
549		hlist_add_head(&call->error_link, &call->peer->error_targets);
550		spin_unlock(&call->peer->lock);
551	}
552}
553
554/*
555 * Set the reap timer.
556 */
557static void rxrpc_set_client_reap_timer(struct rxrpc_local *local)
558{
559	if (!local->kill_all_client_conns) {
560		unsigned long now = jiffies;
561		unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
562
563		if (local->rxnet->live)
564			timer_reduce(&local->client_conn_reap_timer, reap_at);
565	}
566}
567
568/*
569 * Disconnect a client call.
570 */
571void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call *call)
572{
573	struct rxrpc_connection *conn;
574	struct rxrpc_channel *chan = NULL;
575	struct rxrpc_local *local = bundle->local;
576	unsigned int channel;
577	bool may_reuse;
578	u32 cid;
579
580	_enter("c=%x", call->debug_id);
581
582	/* Calls that have never actually been assigned a channel can simply be
583	 * discarded.
584	 */
585	conn = call->conn;
586	if (!conn) {
587		_debug("call is waiting");
588		ASSERTCMP(call->call_id, ==, 0);
589		ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
590		/* May still be on ->new_client_calls. */
591		spin_lock(&local->client_call_lock);
592		list_del_init(&call->wait_link);
593		spin_unlock(&local->client_call_lock);
594		return;
595	}
596
597	cid = call->cid;
598	channel = cid & RXRPC_CHANNELMASK;
599	chan = &conn->channels[channel];
600	trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
601
602	if (WARN_ON(chan->call != call))
603		return;
604
605	may_reuse = rxrpc_may_reuse_conn(conn);
606
607	/* If a client call was exposed to the world, we save the result for
608	 * retransmission.
609	 *
610	 * We use a barrier here so that the call number and abort code can be
611	 * read without needing to take a lock.
612	 *
613	 * TODO: Make the incoming packet handler check this and handle
614	 * terminal retransmission without requiring access to the call.
615	 */
616	if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
617		_debug("exposed %u,%u", call->call_id, call->abort_code);
618		__rxrpc_disconnect_call(conn, call);
619
620		if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
621			trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
622			bundle->try_upgrade = false;
623			if (may_reuse)
624				rxrpc_activate_channels(bundle);
625		}
626	}
627
628	/* See if we can pass the channel directly to another call. */
629	if (may_reuse && !list_empty(&bundle->waiting_calls)) {
630		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
631		rxrpc_activate_one_channel(conn, channel);
632		return;
633	}
634
635	/* Schedule the final ACK to be transmitted in a short while so that it
636	 * can be skipped if we find a follow-on call.  The first DATA packet
637	 * of the follow on call will implicitly ACK this call.
638	 */
639	if (call->completion == RXRPC_CALL_SUCCEEDED &&
640	    test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
641		unsigned long final_ack_at = jiffies + 2;
642
643		chan->final_ack_at = final_ack_at;
644		smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
645		set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
646		rxrpc_reduce_conn_timer(conn, final_ack_at);
647	}
648
649	/* Deactivate the channel. */
650	chan->call = NULL;
651	set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans);
652	conn->act_chans	&= ~(1 << channel);
653
654	/* If no channels remain active, then put the connection on the idle
655	 * list for a short while.  Give it a ref to stop it going away if it
656	 * becomes unbundled.
657	 */
658	if (!conn->act_chans) {
659		trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
660		conn->idle_timestamp = jiffies;
661
662		rxrpc_get_connection(conn, rxrpc_conn_get_idle);
663		list_move_tail(&conn->cache_link, &local->idle_client_conns);
664
665		rxrpc_set_client_reap_timer(local);
666	}
667}
668
669/*
670 * Remove a connection from a bundle.
671 */
672static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
673{
674	struct rxrpc_bundle *bundle = conn->bundle;
675	unsigned int bindex;
676	int i;
677
678	_enter("C=%x", conn->debug_id);
679
680	if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
681		rxrpc_process_delayed_final_acks(conn, true);
682
683	bindex = conn->bundle_shift / RXRPC_MAXCALLS;
684	if (bundle->conns[bindex] == conn) {
685		_debug("clear slot %u", bindex);
686		bundle->conns[bindex] = NULL;
687		bundle->conn_ids[bindex] = 0;
688		for (i = 0; i < RXRPC_MAXCALLS; i++)
689			clear_bit(conn->bundle_shift + i, &bundle->avail_chans);
690		rxrpc_put_client_connection_id(bundle->local, conn);
691		rxrpc_deactivate_bundle(bundle);
692		rxrpc_put_connection(conn, rxrpc_conn_put_unbundle);
693	}
694}
695
696/*
697 * Drop the active count on a bundle.
698 */
699void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle)
700{
701	struct rxrpc_local *local;
702	bool need_put = false;
703
704	if (!bundle)
705		return;
706
707	local = bundle->local;
708	if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) {
709		if (!bundle->exclusive) {
710			_debug("erase bundle");
711			rb_erase(&bundle->local_node, &local->client_bundles);
712			need_put = true;
713		}
714
715		spin_unlock(&local->client_bundles_lock);
716		if (need_put)
717			rxrpc_put_bundle(bundle, rxrpc_bundle_put_discard);
718	}
719}
720
721/*
722 * Clean up a dead client connection.
723 */
724void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
725{
726	struct rxrpc_local *local = conn->local;
727	struct rxrpc_net *rxnet = local->rxnet;
728
729	_enter("C=%x", conn->debug_id);
730
731	trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
732	atomic_dec(&rxnet->nr_client_conns);
733
734	rxrpc_put_client_connection_id(local, conn);
735}
736
737/*
738 * Discard expired client connections from the idle list.  Each conn in the
739 * idle list has been exposed and holds an extra ref because of that.
740 *
741 * This may be called from conn setup or from a work item so cannot be
742 * considered non-reentrant.
743 */
744void rxrpc_discard_expired_client_conns(struct rxrpc_local *local)
745{
746	struct rxrpc_connection *conn;
747	unsigned long expiry, conn_expires_at, now;
748	unsigned int nr_conns;
749
750	_enter("");
751
752	/* We keep an estimate of what the number of conns ought to be after
753	 * we've discarded some so that we don't overdo the discarding.
754	 */
755	nr_conns = atomic_read(&local->rxnet->nr_client_conns);
756
757next:
758	conn = list_first_entry_or_null(&local->idle_client_conns,
759					struct rxrpc_connection, cache_link);
760	if (!conn)
761		return;
762
763	if (!local->kill_all_client_conns) {
764		/* If the number of connections is over the reap limit, we
765		 * expedite discard by reducing the expiry timeout.  We must,
766		 * however, have at least a short grace period to be able to do
767		 * final-ACK or ABORT retransmission.
768		 */
769		expiry = rxrpc_conn_idle_client_expiry;
770		if (nr_conns > rxrpc_reap_client_connections)
771			expiry = rxrpc_conn_idle_client_fast_expiry;
772		if (conn->local->service_closed)
773			expiry = rxrpc_closed_conn_expiry * HZ;
774
775		conn_expires_at = conn->idle_timestamp + expiry;
776
777		now = jiffies;
778		if (time_after(conn_expires_at, now))
779			goto not_yet_expired;
780	}
781
782	atomic_dec(&conn->active);
783	trace_rxrpc_client(conn, -1, rxrpc_client_discard);
784	list_del_init(&conn->cache_link);
785
786	rxrpc_unbundle_conn(conn);
787	/* Drop the ->cache_link ref */
788	rxrpc_put_connection(conn, rxrpc_conn_put_discard_idle);
789
790	nr_conns--;
791	goto next;
792
793not_yet_expired:
794	/* The connection at the front of the queue hasn't yet expired, so
795	 * schedule the work item for that point if we discarded something.
796	 *
797	 * We don't worry if the work item is already scheduled - it can look
798	 * after rescheduling itself at a later time.  We could cancel it, but
799	 * then things get messier.
800	 */
801	_debug("not yet");
802	if (!local->kill_all_client_conns)
803		timer_reduce(&local->client_conn_reap_timer, conn_expires_at);
804
805	_leave("");
806}
807
808/*
809 * Clean up the client connections on a local endpoint.
810 */
811void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
812{
813	struct rxrpc_connection *conn;
814
815	_enter("");
816
817	local->kill_all_client_conns = true;
818
819	del_timer_sync(&local->client_conn_reap_timer);
820
821	while ((conn = list_first_entry_or_null(&local->idle_client_conns,
822						struct rxrpc_connection, cache_link))) {
823		list_del_init(&conn->cache_link);
824		atomic_dec(&conn->active);
825		trace_rxrpc_client(conn, -1, rxrpc_client_discard);
826		rxrpc_unbundle_conn(conn);
827		rxrpc_put_connection(conn, rxrpc_conn_put_local_dead);
828	}
829
830	_leave(" [culled]");
831}